@article{46394,
  abstract     = {{Meta-heuristics are frequently used to tackle NP-hard combinatorial optimization problems. With this paper we contribute to the understanding of the success of 2-opt based local search algorithms for solving the traveling salesperson problem (TSP). Although 2-opt is widely used in practice, it is hard to understand its success from a theoretical perspective. We take a statistical approach and examine the features of TSP instances that make the problem either hard or easy to solve. As a measure of problem difficulty for 2-opt we use the approximation ratio that it achieves on a given instance. Our investigations point out important features that make TSP instances hard or easy to be approximated by 2-opt.}},
  author       = {{Mersmann, O and Bischl, B and Trautmann, Heike and Wagner, M and Bossek, Jakob and Neumann, F}},
  journal      = {{Annals of Mathematics and Artificial Intelligence}},
  pages        = {{151–182}},
  title        = {{{A Novel Feature-Based Approach to Characterize Algorithm Performance for the Traveling Salesman Problem}}},
  volume       = {{69}},
  year         = {{2013}},
}

@inproceedings{36919,
  abstract     = {{Faced with increasing demands on energy efficiency, current electronic systems operate according to complex power management schemes including more and more fine-grained voltage frequency scaling and power shutdown scenarios. Consequently, validation of the power design intent should begin as early as possible at electronic system-level (ESL) together with first executable system specifications for integrity tests. However, today's system-level design methodologies usually focus on the abstraction of digital logic and time, so that typical low-power aspects cannot be considered so far. In this paper, we present a high-level modeling approach on top of the SystemC/TLM standard to simulate power distribution and voltage based implications in a "loosely-timed" functional execution context. The approach reuses legacy TLM models and prevents the need for detailed lock-step process synchronization in contrast to existing methods. A case study derived from an open source low-power design demonstrates the efficiency of our approach in terms of simulation performance and testability.}},
  author       = {{Mischkalla, Fabian and Müller, Wolfgang}},
  keywords     = {{Time-varying systems, Time-domain analysis, Synchronization, Context modeling, Clocks, Semantics, Standards}},
  publisher    = {{IEEE}},
  title        = {{{Efficient Power-Intent Validation Using "Loosely-Timed" Simulation Models: A Non-Invasive Approach}}},
  doi          = {{10.1109/PATMOS.2013.6662171}},
  year         = {{2013}},
}

@inproceedings{36920,
  abstract     = {{In the electronic system development, energy consumption is clearly becoming one of the most important design concerns. From the system level point of view, Dynamic Power Management (DPM) and Dynamic Voltage and Frequency Scaling (DVFS) are two mostly applied techniques to adjust the tradeoff between the performance and power dissipation at runtime. In this paper, we study the problem of combined application of both techniques with regard to hard real-time systems running on cluster-based multi-core processors. To optimize the processor energy consumption, a heuristic based on simulated annealing with efficient termination criterion is proposed. The experiment results show that the proposed algorithm outperforms the existing approaches in terms of the energy reduction. }},
  author       = {{He, Da and Müller, Wolfgang}},
  booktitle    = {{Proceedings of the International Conference on Applied Computing (AC)}},
  editor       = {{Weghorn, Hans}},
  isbn         = {{978-989-8533-20-3 }},
  keywords     = {{Dynamic Power Management, Dynamic Voltage and Frequency Scaling, Hard Real-Time, Multi-core Processor}},
  title        = {{{An Energy-Efficient Heuristic for Hard Real-Time System on Multi-Core Processors}}},
  year         = {{2013}},
}

@phdthesis{8425,
  abstract     = {{This thesis studies three topics in quantum computation and information: The approximability of quantum problems, quantum proof systems, and non-classical correlations in quantum systems. 

In the first area, we demonstrate a polynomial-time (classical) approximation algorithm for dense instances of the canonical QMA-complete quantum constraint satisfaction problem, the local Hamiltonian problem. In the opposite direction, we next introduce a quantum generalization of the polynomial-time hierarchy, and define problems which we prove are not only complete for the second level of this hierarchy, but are in fact hard to approximate. 

In the second area, we study variants of the interesting and stubbornly open question of whether a quantum proof system with multiple unentangled quantum provers is equal in expressive power to a proof system with a single quantum prover. Our results concern classes such as BellQMA(poly), and include a novel proof of perfect parallel repetition for SepQMA(m) based on cone programming duality. 

In the third area, we study non-classical quantum correlations beyond entanglement, often dubbed "non-classicality". Among our results are two novel schemes for quantifying non-classicality: The first proposes the new paradigm of exploiting local unitary operations to study non-classical correlations, and the second introduces a protocol through which non-classical correlations in a starting system can be "activated" into distillable entanglement with an ancilla system. 

An introduction to all required linear algebra and quantum mechanics is included.}},
  author       = {{Gharibian, Sevag}},
  pages        = {{240}},
  title        = {{{Approximation, Proof Systems, and Correlations in a Quantum World}}},
  year         = {{2013}},
}

@article{8173,
  abstract     = {{We study three variants of multi-prover quantum Merlin-Arthur proof systems. We first show that the class of problems that can be efficiently verified using polynomially many quantum proofs, each of logarithmic-size, is exactly MQA (also known as QCMA), the class of problems which can be efficiently verified via a classical proof and a quantum verifier. We then study the class BellQMA(poly), characterized by a verifier who first applies unentangled, nonadaptive measurements to each of the polynomially many proofs, followed by an arbitrary but efficient quantum verification circuit on the resulting measurement outcomes. We show that if the number of outcomes per nonadaptive measurement is a polynomially-bounded function, then the expressive power of the proof system is exactly QMA. Finally, we study a class equivalent to QMA(m), denoted SepQMA(m), where the verifier's measurement operator corresponding to outcome "accept" is a fully separable operator across the m quantum proofs. Using cone programming duality, we give an alternate proof of a result of Harrow and Montanaro [FOCS, pp. 633--642 (2010)] that shows a perfect parallel repetition theorem for SepQMA(m) for any m.}},
  author       = {{Gharibian, Sevag and Sikora, Jamie and Upadhyay, Sarvagya}},
  journal      = {{Quantum Information & Computation}},
  number       = {{1-2}},
  pages        = {{135--157}},
  title        = {{{QMA variants with polynomially many provers}}},
  volume       = {{13}},
  year         = {{2013}},
}

@inproceedings{528,
  abstract     = {{Cold-boot attacks exploit the fact that DRAM contents are not immediately lost when a PC is powered off. Instead the contents decay rather slowly, in particular if the DRAM chips are cooled to low temperatures. This effect opens an attack vector on cryptographic applications that keep decrypted keys in DRAM. An attacker with access to the target computer can reboot it or remove the RAM modules and quickly copy the RAM contents to non-volatile memory. By exploiting the known cryptographic structure of the cipher and layout of the key data in memory, in our application an AES key schedule with redundancy, the resulting memory image can be searched for sections that could correspond to decayed cryptographic keys; then, the attacker can attempt to reconstruct the original key. However, the runtime of these algorithms grows rapidly with increasing memory image size, error rate and complexity of the bit error model, which limits the practicability of the approach.In this work, we study how the algorithm for key search can be accelerated with custom computing machines. We present an FPGA-based architecture on a Maxeler dataflow computing system that outperforms a software implementation up to 205x, which significantly improves the practicability of cold-attacks against AES.}},
  author       = {{Riebler, Heinrich and Kenter, Tobias and Sorge, Christoph and Plessl, Christian}},
  booktitle    = {{Proceedings of the International Conference on Field-Programmable Technology (FPT)}},
  keywords     = {{coldboot}},
  pages        = {{386--389}},
  publisher    = {{IEEE}},
  title        = {{{FPGA-accelerated Key Search for Cold-Boot Attacks against AES}}},
  doi          = {{10.1109/FPT.2013.6718394}},
  year         = {{2013}},
}

@inproceedings{505,
  abstract     = {{In this paper we introduce “On-The-Fly Computing”, our vision of future IT services that will be provided by assembling modular software components available on world-wide markets. After suitable components have been found, they are automatically integrated, configured and brought to execution in an On-The-Fly Compute Center. We envision that these future compute centers will continue to leverage three current trends in large scale computing which are an increasing amount of parallel processing, a trend to use heterogeneous computing resources, and—in the light of rising energy cost—energy-efficiency as a primary goal in the design and operation of computing systems. In this paper, we point out three research challenges and our current work in these areas.}},
  author       = {{Happe, Markus and Kling, Peter and Plessl, Christian and Platzner, Marco and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the 9th IEEE Workshop on Software Technology for Future embedded and Ubiquitous Systems (SEUS)}},
  publisher    = {{IEEE}},
  title        = {{{On-The-Fly Computing: A Novel Paradigm for Individualized IT Services}}},
  doi          = {{10.1109/ISORC.2013.6913232}},
  year         = {{2013}},
}

@inproceedings{1787,
  author       = {{Suess, Tim and Schoenrock, Andrew and Meisner, Sebastian and Plessl, Christian}},
  booktitle    = {{Proc. Int. Symp. on Parallel and Distributed Processing Workshops (IPDPSW)}},
  isbn         = {{978-0-7695-4979-8}},
  pages        = {{64--73}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{Parallel Macro Pipelining on the Intel SCC Many-Core Computer}}},
  doi          = {{10.1109/IPDPSW.2013.136}},
  year         = {{2013}},
}

@article{15180,
  author       = {{Owen, G Scott and Domik, Gitta and Ebert, David S and Kohlhammer, Jörn and Rushmeier, Holly and Santos, Beatriz Sousa and Weiskopf, Daniel}},
  journal      = {{IEEE computer graphics and applications}},
  number       = {{4}},
  pages        = {{14--19}},
  publisher    = {{IEEE}},
  title        = {{{How visualization courses have changed over the past 10 years}}},
  doi          = {{10.1109/MCG.2013.57}},
  volume       = {{33}},
  year         = {{2013}},
}

@article{60453,
  abstract     = {{<jats:p>
            The most popular and actively researched class of quad remeshing techniques is the family of
            <jats:italic>parametrization based quad meshing methods</jats:italic>
            . They all strive to generate an
            <jats:italic>integer-grid map</jats:italic>
            , i.e. a parametrization of the input surface into R
            <jats:sup>2</jats:sup>
            such that the canonical grid of integer iso-lines forms a quad mesh when mapped back onto the surface in R
            <jats:sup>3</jats:sup>
            . An essential, albeit broadly neglected aspect of these methods is the
            <jats:italic>quad extraction</jats:italic>
            step, i.e. the materialization of an actual quad mesh from the mere "quad texture". Quad (mesh) extraction is often believed to be a trivial matter but quite the opposite is true: numerous special cases, ambiguities induced by numerical inaccuracies and limited solver precision, as well as imperfections in the maps produced by most methods (unless costly countermeasures are taken) pose significant challenges to the quad extractor. We present a method to sanitize a provided parametrization such that it becomes numerically consistent even in a limited precision floating point representation. Based on this we are able to provide a comprehensive and sound description of how to perform quad extraction robustly and without the need for any complex tolerance thresholds or disambiguation rules. On top of that we develop a novel strategy to cope with common local fold-overs in the parametrization. This allows our method, dubbed
            <jats:italic>QEx</jats:italic>
            , to generate all-quadrilateral meshes where otherwise holes, non-quad polygons or no output at all would have been produced. We thus enable the practical use of an entire class of maps that was previously considered defective. Since state of the art quad meshing methods spend a significant share of their run time solely to prevent local fold-overs, using our method it is now possible to obtain quad meshes significantly quicker than before. We also provide libQEx, an open source C++ reference implementation of our method and thus significantly lower the bar to enter the field of quad meshing.
          </jats:p>}},
  author       = {{Ebke, Hans-Christian and Bommes, David and Campen, Marcel and Kobbelt, Leif}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--10}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{QEx}}},
  doi          = {{10.1145/2508363.2508372}},
  volume       = {{32}},
  year         = {{2013}},
}

@article{60452,
  abstract     = {{<jats:p>Quadrilateral remeshing approaches based on global parametrization enable many desirable mesh properties. Two of the most important ones are (1) high regularity due to explicit control over irregular vertices and (2) smooth distribution of distortion achieved by convex variational formulations. Apart from these strengths, state-of-the-art techniques suffer from limited reliability on real-world input data, i.e. the determined map might have degeneracies like (local) non-injectivities and consequently often cannot be used directly to generate a quadrilateral mesh. In this paper we propose a novel convex Mixed-Integer Quadratic Programming (MIQP) formulation which ensures by construction that the resulting map is within the class of so called Integer-Grid Maps that are guaranteed to imply a quad mesh. In order to overcome the NP-hardness of MIQP and to be able to remesh typical input geometries in acceptable time we propose two additional problem specific optimizations: a complexity reduction algorithm and singularity separating conditions. While the former decouples the dimension of the MIQP search space from the input complexity of the triangle mesh and thus is able to dramatically speed up the computation without inducing inaccuracies, the latter improves the continuous relaxation, which is crucial for the success of modern MIQP optimizers. Our experiments show that the reliability of the resulting algorithm does not only annihilate the main drawback of parametrization based quad-remeshing but moreover enables the global search for high-quality coarse quad layouts - a difficult task solely tackled by greedy methodologies before.</jats:p>}},
  author       = {{Bommes, David and Campen, Marcel and Ebke, Hans-Christian and Alliez, Pierre and Kobbelt, Leif}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{4}},
  pages        = {{1--12}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Integer-grid maps for reliable quad meshing}}},
  doi          = {{10.1145/2461912.2462014}},
  volume       = {{32}},
  year         = {{2013}},
}

@article{60451,
  abstract     = {{<jats:p>Nowadays, digital 3D models are in widespread and ubiquitous use, and each specific application dealing with 3D geometry has its own quality requirements that restrict the class of acceptable and supported models. This article analyzes typical defects that make a 3D model unsuitable for key application contexts, and surveys existing algorithms that process, repair, and improve its structure, geometry, and topology to make it appropriate to case-by-case requirements.</jats:p>
          <jats:p>The analysis is focused on polygon meshes, which constitute by far the most common 3D object representation. In particular, this article provides a structured overview of mesh repairing techniques from the point of view of the application context. Different types of mesh defects are classified according to the upstream application that produced the mesh, whereas mesh quality requirements are grouped by representative sets of downstream applications where the mesh is to be used. The numerous mesh repair methods that have been proposed during the last two decades are analyzed and classified in terms of their capabilities, properties, and guarantees. Based on these classifications, guidelines can be derived to support the identification of repairing algorithms best-suited to bridge the compatibility gap between the quality provided by the upstream process and the quality required by the downstream applications in a given geometry processing scenario.</jats:p>}},
  author       = {{Attene, Marco and Campen, Marcel and Kobbelt, Leif}},
  issn         = {{0360-0300}},
  journal      = {{ACM Computing Surveys}},
  number       = {{2}},
  pages        = {{1--33}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Polygon mesh repairing}}},
  doi          = {{10.1145/2431211.2431214}},
  volume       = {{45}},
  year         = {{2013}},
}

@article{60450,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>The computation of intrinsic, geodesic distances and geodesic paths on surfaces is a fundamental low‐level building block in countless Computer Graphics and Geometry Processing applications. This demand led to the development of numerous algorithms – some for the exact, others for the approximative computation, some focussing on speed, others providing strict guarantees. Most of these methods are designed for computing distances according to the standard Riemannian metric induced by the surface's embedding in Euclidean space. Generalization to other, especially anisotropic, metrics – which more recently gained interest in several application areas – is not rarely hampered by fundamental problems. We explore and discuss possibilities for the generalization and extension of well‐known methods to the anisotropic case, evaluate their relative performance in terms of accuracy and speed, and propose a novel algorithm, the <jats:italic>Short‐Term Vector Dijkstra</jats:italic>. This algorithm is strikingly simple to implement and proves to provide practical accuracy at a higher speed than generalized previous methods.</jats:p>}},
  author       = {{Campen, Marcel and Heistermann, Martin and Kobbelt, Leif}},
  issn         = {{0167-7055}},
  journal      = {{Computer Graphics Forum}},
  number       = {{5}},
  pages        = {{63--71}},
  publisher    = {{Wiley}},
  title        = {{{Practical Anisotropic Geodesy}}},
  doi          = {{10.1111/cgf.12173}},
  volume       = {{32}},
  year         = {{2013}},
}

@inproceedings{60454,
  author       = {{Zimmer, Henrik and Campen, Marcel and Kobbelt, Leif}},
  booktitle    = {{2013 IEEE Conference on Computer Vision and Pattern Recognition}},
  publisher    = {{IEEE}},
  title        = {{{Efficient Computation of Shortest Path-Concavity for 3D Meshes}}},
  doi          = {{10.1109/cvpr.2013.280}},
  year         = {{2013}},
}

@techreport{2504,
  author       = {{Khan, Rana Azeem M. and Karl, Holger}},
  title        = {{{Simulating Cooperative Diversity Protocols for Multi-hop Wireless and Sensor Networks}}},
  year         = {{2012}},
}

@techreport{2505,
  author       = {{Dannewitz, Christian and Karl, Holger and Yadav, Aditya}},
  title        = {{{Report on Locality in DNS Requests – Evaluation and Impact on Future Internet Architectures}}},
  year         = {{2012}},
}

@inproceedings{20173,
  abstract     = {{This paper investigates the properties required to evolve Artificial Neural Networks for distributed control in modular robotics, which typically involves non-linear dynamics and complex interactions in the sensori-motor space. We investigate the relation between macro-scale properties (such as modularity and regularity) and micro-scale properties in Neural Network controllers. We show how neurons capable of multiplicative-like arithmetic operations may increase the performance of controllers in several ways whenever challenging control problems with non-linear dynamics are involved. This paper provides evidence that performance and robustness of evolved controllers can be improved by a combination of carefully chosen micro- and macro-scale neural network properties.}},
  author       = {{Hamann, Heiko and Stradner, Jürgen and Bredeche, Nicolas and Cazenille, Leo}},
  booktitle    = {{14th Annual Genetic and Evolutionary Computation Conference, GECCO 2012}},
  pages        = {{89--96}},
  publisher    = {{ACM}},
  title        = {{{Impact of Neuron Models and Network Structure on Evolving Modular Robot Neural Network Controllers}}},
  doi          = {{10.1145/2330163.2330177}},
  year         = {{2012}},
}

@inproceedings{20174,
  abstract     = {{As a contribution to the efforts towards robotic systems of higher flexibility we present our concept of morphologically dynamic robots. Within the projects SYMBRION and REPLICATOR, that focus on modular robotics, we have developed bio-inspired control techniques to achieve new concepts of dynamic, autonomous morphological structures. We propose three modes of coupling between robot modules: swarm, team, and organism mode. We demonstrate our concepts along with simple robot experiments.}},
  author       = {{Hamann, Heiko and Schmickl, Thomas and Stradner, Jürgen}},
  booktitle    = {{Austrian Robotics Workshop (Operational Programme Slovenia-Austria)}},
  title        = {{{Towards Morphological Flexibility: Modular Robotics and Bio-inspired Control}}},
  year         = {{2012}},
}

@inproceedings{20175,
  author       = {{Hamann, Heiko and Schmickl, Thomas and Stradner, Jürgen and Crailsheim, Karl and Zahadat, Payam and Adami, Christoph and Bryson, David M. and Ofria, Charles and Pennock, Robert T.}},
  booktitle    = {{Alife XIII}},
  pages        = {{597--598}},
  publisher    = {{MIT Press}},
  title        = {{{On-line, On-board Evolution of Reaction-Diffusion Control for Self-Adaptation}}},
  year         = {{2012}},
}

@article{20176,
  author       = {{Hamann, Heiko and Schmickl, Thomas and Crailsheim, Karl}},
  issn         = {{1387-3954}},
  journal      = {{Mathematical and Computer Modelling of Dynamical Systems}},
  number       = {{1}},
  pages        = {{39--50}},
  title        = {{{Self-organized pattern formation in a swarm system as a transient phenomenon of non-linear dynamics}}},
  doi          = {{10.1080/13873954.2011.601418}},
  volume       = {{18}},
  year         = {{2012}},
}

