@inproceedings{46377,
  abstract     = {{We evaluate the performance of a multi-objective evolutionary algorithm on a class of dynamic routing problems with a single vehicle. In particular we focus on relating algorithmic performance to the most prominent characteristics of problem instances. The routing problem considers two types of customers: mandatory customers must be visited whereas optional customers do not necessarily have to be visited. Moreover, mandatory customers are known prior to the start of the tour whereas optional customers request for service at later points in time with the vehicle already being on its way. The multi-objective optimization problem then results as maximizing the number of visited customers while simultaneously minimizing total travel time. As an a-posteriori evaluation tool, the evolutionary algorithm aims at approximating the related Pareto set for specifically designed benchmarking instances differing in terms of number of customers, geographical layout, fraction of mandatory customers, and request times of optional customers. Conceptional and experimental comparisons to online heuristic procedures are provided.}},
  author       = {{Meisel, Stephan and Grimme, Christian and Bossek, Jakob and Wölck, Martin and Rudolph, Guenter and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference (GECCO ’15)}},
  isbn         = {{978-1-4503-3472-3}},
  pages        = {{425–432}},
  title        = {{{Evaluation of a Multi-Objective EA on Benchmark Instances for Dynamic Routing of a Vehicle}}},
  doi          = {{10.1145/2739480.2754705}},
  year         = {{2015}},
}

@inproceedings{8164,
  abstract     = {{The study of ground state energies of local Hamiltonians has played a fundamental role in quantum complexity theory. In this paper, we take a new direction by introducing the physically motivated notion of ``ground state connectivity'' of local Hamiltonians, which captures problems in areas ranging from quantum stabilizer codes to quantum memories. We show that determining how ``connected'' the ground space of a local Hamiltonian is can range from QCMA-complete to PSPACE-complete, as well as NEXP-complete for an appropriately defined ``succinct'' version of the problem. As a result, we obtain a natural QCMA-complete problem, a goal which has generally proven difficult since the conception of QCMA over a decade ago. Our proofs rely on a new technical tool, the Traversal Lemma, which analyzes the Hilbert space a local unitary evolution must traverse under certain conditions. We show that this lemma is essentially tight with respect to the length of the unitary evolution in question.}},
  author       = {{Gharibian, Sevag and Sikora, Jamie}},
  booktitle    = {{International Colloquium on Automata, Languages, and Programming (ICALP 2015)}},
  editor       = {{Halld{\'o}rsson, Magn{\'u}s M. and Iwama, Kazuo and Kobayashi, Naoki and Speckmann, Bettina}},
  isbn         = {{978-3-662-47672-7}},
  location     = {{Kyoto, Japan}},
  pages        = {{617--628}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{Ground State Connectivity of Local Hamiltonians}}},
  doi          = {{10.1007/978-3-662-47672-7_50}},
  year         = {{2015}},
}

@article{8166,
  abstract     = {{Constraint satisfaction problems are a central pillar of modern computational complexity theory. This survey provides an introduction to the rapidly growing field of Quantum Hamiltonian Complexity, which includes the study of quantum constraint satisfaction problems. Over the past decade and a half, this field has witnessed fundamental breakthroughs, ranging from the establishment of a “Quantum Cook-Levin Theorem” to deep insights into the structure of 1D low-temperature quantum systems via so-called area laws. Our aim here is to provide a computer science-oriented introduction to the subject in order to help bridge the language barrier between computer scientists and physicists in the field. As such, we include the following in this survey: (1) The motivations and history of the field, (2) a glossary of condensed matter physics terms explained in computer-science friendly language, (3) overviews of central ideas from condensed matter physics, such as indistinguishable particles, mean field theory, tensor networks, and area laws, and (4) brief expositions of selected computer science-based results in the area. For example, as part of the latter, we provide a novel information theoretic presentation of Bravyi’s polynomial time algorithm for Quantum 2-SAT.}},
  author       = {{Gharibian, Sevag and Huang, Yichen and Landau, Zeph and Woo Shin, Seung}},
  issn         = {{1551-305X}},
  journal      = {{Foundations and Trends® in Theoretical Computer Science}},
  number       = {{3}},
  pages        = {{159--282}},
  title        = {{{Quantum Hamiltonian Complexity}}},
  doi          = {{10.1561/0400000066}},
  volume       = {{10}},
  year         = {{2015}},
}

@article{8168,
  abstract     = {{Tensor networks are a central tool in condensed matter physics. In this paper, we initiate the study of tensor network non-zero testing (TNZ): Given a tensor network T, does T represent a non-zero vector? We show that TNZ is not in the Polynomial-Time Hierarchy unless the hierarchy collapses. We next show (among other results) that the special cases of TNZ on non-negative and injective tensor networks are in NP. Using this, we make a simple observation: The commuting variant of the MA-complete stoquastic k-SAT problem on D-dimensional qudits is in NP for logarithmic k and constant D. This reveals the first class of quantum Hamiltonians whose commuting variant is known to be in NP for all (1) logarithmic k, (2) constant D, and (3) for arbitrary interaction graphs.
}},
  author       = {{Gharibian, Sevag and Landau, Zeph and Woo Shin, Seung and Wang, Guoming}},
  journal      = {{Quantum Information & Computation}},
  number       = {{9{\&}10}},
  pages        = {{885--899}},
  title        = {{{Tensor network non-zero testing}}},
  volume       = {{15}},
  year         = {{2015}},
}

@article{296,
  abstract     = {{FPGAs are known to permit huge gains in performance and efficiency for suitable applications but still require reduced design efforts and shorter development cycles for wider adoption. In this work, we compare the resulting performance of two design concepts that in different ways promise such increased productivity. As common starting point, we employ a kernel-centric design approach, where computational hotspots in an application are identified and individually accelerated on FPGA. By means of a complex stereo matching application, we evaluate two fundamentally different design philosophies and approaches for implementing the required kernels on FPGAs. In the first implementation approach, we designed individually specialized data flow kernels in a spatial programming language for a Maxeler FPGA platform; in the alternative design approach, we target a vector coprocessor with large vector lengths, which is implemented as a form of programmable overlay on the application FPGAs of a Convey HC-1. We assess both approaches in terms of overall system performance, raw kernel performance, and performance relative to invested resources. After compensating for the effects of the underlying hardware platforms, the specialized dataflow kernels on the Maxeler platform are around 3x faster than kernels executing on the Convey vector coprocessor. In our concrete scenario, due to trade-offs between reconfiguration overheads and exposed parallelism, the advantage of specialized dataflow kernels is reduced to around 2.5x.}},
  author       = {{Kenter, Tobias and Schmitz, Henning and Plessl, Christian}},
  journal      = {{International Journal of Reconfigurable Computing (IJRC)}},
  publisher    = {{Hindawi}},
  title        = {{{Exploring Tradeoffs between Specialized Kernels and a Reusable Overlay in a Stereo-Matching Case Study}}},
  doi          = {{10.1155/2015/859425}},
  volume       = {{2015}},
  year         = {{2015}},
}

@inproceedings{303,
  abstract     = {{This paper introduces Binary Acceleration At Runtime(BAAR), an easy-to-use on-the-fly binary acceleration mechanismwhich aims to tackle the problem of enabling existentsoftware to automatically utilize accelerators at runtime. BAARis based on the LLVM Compiler Infrastructure and has aclient-server architecture. The client runs the program to beaccelerated in an environment which allows program analysisand profiling. Program parts which are identified as suitable forthe available accelerator are exported and sent to the server.The server optimizes these program parts for the acceleratorand provides RPC execution for the client. The client transformsits program to utilize accelerated execution on the server foroffloaded program parts. We evaluate our work with a proofof-concept implementation of BAAR that uses an Intel XeonPhi 5110P as the acceleration target and performs automaticoffloading, parallelization and vectorization of suitable programparts. The practicality of BAAR for real-world examples is shownbased on a study of stencil codes. Our results show a speedup ofup to 4 without any developer-provided hints and 5.77 withhints over the same code compiled with the Intel Compiler atoptimization level O2 and running on an Intel Xeon E5-2670machine. Based on our insights gained during implementationand evaluation we outline future directions of research, e.g.,offloading more fine-granular program parts than functions, amore sophisticated communication mechanism or introducing onstack-replacement.}},
  author       = {{Damschen, Marvin and Plessl, Christian}},
  booktitle    = {{Proceedings of the 5th International Workshop on Adaptive Self-tuning Computing Systems (ADAPT)}},
  title        = {{{Easy-to-Use On-The-Fly Binary Program Acceleration on Many-Cores}}},
  year         = {{2015}},
}

@inproceedings{1773,
  author       = {{Schumacher, Jörn and T. Anderson, J. and Borga, A. and Boterenbrood, H. and Chen, H. and Chen, K. and Drake, G. and Francis, D. and Gorini, B. and Lanni, F. and Lehmann-Miotto, Giovanna and Levinson, L. and Narevicius, J. and Plessl, Christian and Roich, A. and Ryu, S. and P. Schreuder, F. and Vandelli, Wainer and Vermeulen, J. and Zhang, J.}},
  booktitle    = {{Proc. Int. Conf. on Distributed Event-Based Systems (DEBS)}},
  publisher    = {{ACM}},
  title        = {{{Improving Packet Processing Performance in the ATLAS FELIX Project – Analysis and Optimization of a Memory-Bounded Algorithm}}},
  doi          = {{10.1145/2675743.2771824}},
  year         = {{2015}},
}

@article{1768,
  author       = {{Plessl, Christian and Platzner, Marco and Schreier, Peter J.}},
  journal      = {{Informatik Spektrum}},
  keywords     = {{approximate computing, survey}},
  number       = {{5}},
  pages        = {{396--399}},
  publisher    = {{Springer}},
  title        = {{{Aktuelles Schlagwort: Approximate Computing}}},
  doi          = {{10.1007/s00287-015-0911-z}},
  year         = {{2015}},
}

@inproceedings{238,
  abstract     = {{In this paper, we study how binary applications can be transparently accelerated with novel heterogeneous computing resources without requiring any manual porting or developer-provided hints. Our work is based on Binary Acceleration At Runtime (BAAR), our previously introduced binary acceleration mechanism that uses the LLVM Compiler Infrastructure. BAAR is designed as a client-server architecture. The client runs the program to be accelerated in an environment, which allows program analysis and profiling and identifies and extracts suitable program parts to be offloaded. The server compiles and optimizes these offloaded program parts for the accelerator and offers access to these functions to the client with a remote procedure call (RPC) interface. Our previous work proved the feasibility of our approach, but also showed that communication time and overheads limit the granularity of functions that can be meaningfully offloaded. In this work, we motivate the importance of a lightweight, high-performance communication between server and client and present a communication mechanism based on the Message Passing Interface (MPI). We evaluate our approach by using an Intel Xeon Phi 5110P as the acceleration target and show that the communication overhead can be reduced from 40% to 10%, thus enabling even small hotspots to benefit from offloading to an accelerator.}},
  author       = {{Damschen, Marvin and Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian}},
  booktitle    = {{Proceedings of the 2015 Conference on Design, Automation and Test in Europe (DATE)}},
  pages        = {{1078--1083}},
  publisher    = {{EDA Consortium / IEEE}},
  title        = {{{Transparent offloading of computational hotspots from binary code to Xeon Phi}}},
  doi          = {{10.7873/DATE.2015.1124}},
  year         = {{2015}},
}

@article{1775,
  abstract     = {{The ATLAS experiment at CERN is planning full deployment of a new unified optical link technology for connecting detector front end electronics on the timescale of the LHC Run 4 (2025). It is estimated that roughly 8000 GBT (GigaBit Transceiver) links, with transfer rates up to 10.24 Gbps, will replace existing links used for readout, detector control and distribution of timing and trigger information. A new class of devices will be needed to interface many GBT links to the rest of the trigger, data-acquisition and detector control systems. In this paper FELIX (Front End LInk eXchange) is presented, a PC-based device to route data from and to multiple GBT links via a high-performance general purpose network capable of a total throughput up to O(20 Tbps). FELIX implies architectural changes to the ATLAS data acquisition system, such as the use of industry standard COTS components early in the DAQ chain. Additionally the design and implementation of a FELIX demonstration platform is presented and hardware and software aspects will be discussed.}},
  author       = {{Anderson, J and Borga, A and Boterenbrood, H and Chen, H and Chen, K and Drake, G and Francis, D and Gorini, B and Lanni, F and Lehmann Miotto, G and Levinson, L and Narevicius, J and Plessl, Christian and Roich, A and Ryu, S and Schreuder, F and Schumacher, Jörn and Vandelli, Wainer and Vermeulen, J and Zhang, J}},
  journal      = {{Journal of Physics: Conference Series}},
  publisher    = {{IOP Publishing}},
  title        = {{{FELIX: a High-Throughput Network Approach for Interfacing to Front End Electronics for ATLAS Upgrades}}},
  doi          = {{10.1088/1742-6596/664/8/082050}},
  volume       = {{664}},
  year         = {{2015}},
}

@phdthesis{60446,
  author       = {{Campen, Marcel}},
  publisher    = {{RWTH Aachen University, Germany}},
  title        = {{{Quad layouts: generation and optimization of conforming quadrilateral surface partitions}}},
  year         = {{2015}},
}

@article{60442,
  abstract     = {{<jats:p>Global surface parametrization often requires the use of cuts or charts due to non-trivial topology. In recent years a focus has been on so-called<jats:italic>seamless</jats:italic>parametrizations, where the transition functions across the cuts are rigid transformations with a rotation about some multiple of 90°. Of particular interest, e.g. for quadrilateral meshing, paneling, or texturing, are those instances where in addition the translational part of these transitions is integral (or more generally: quantized). We show that finding not even the optimal, but just an arbitrary valid quantization (one that does not imply parametric degeneracies), is a complex combinatorial problem. We present a novel method that allows us to solve it, i.e. to find valid as well as good quality quantizations. It is based on an original approach to quickly construct solutions to linear Diophantine equation systems, exploiting the specific geometric nature of the parametrization problem. We thereby largely outperform the state-of-the-art, sometimes by several orders of magnitude.</jats:p>}},
  author       = {{Campen, Marcel and Bommes, David and Kobbelt, Leif}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--12}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Quantized global parametrization}}},
  doi          = {{10.1145/2816795.2818140}},
  volume       = {{34}},
  year         = {{2015}},
}

@article{52869,
  author       = {{Hansen, Eva and Grimme, Britta and Reimann, Hendrik and Schöner, Gregor}},
  journal      = {{Experimental brain research}},
  pages        = {{2555–2569}},
  publisher    = {{Springer}},
  title        = {{{Carry-over coarticulation in joint angles}}},
  volume       = {{233}},
  year         = {{2015}},
}

@inproceedings{25112,
  abstract     = {{Automatically composing service-based software solutions is a challenging task. Considering context information during this service composition process is even more challenging. In domains such as image processing, however, context-sensitivity is inherent and cannot be ignored when developing techniques for automatic service composition. Formal approaches tend to create ambiguous solutions, whenever the expressive power of the applied formalism is limited. For example, services may have the same formal specification, although their actual functionality depends on the concrete context. In order to satisfy individual user requests while providing data-dependent functionality, formal approaches have to be extended. We propose to incorporate Reinforcement Learning techniques and combine them with planning based composition approaches. While planning ensures formally correct solutions, learning enables the composition process to resolve ambiguity by implicitly considering context information. Preliminary results show that our combined approach adapts to a static context while still satisfying formally specified requirements.}},
  author       = {{Jungmann, Alexander and Kleinjohann, Bernd}},
  booktitle    = {{Proceedings of the 6th IEEE International Conference on Cloud Computing Technology and Science (CloudCom)}},
  pages        = {{755--758}},
  publisher    = {{IEEE}},
  title        = {{{Towards Context-Sensitive Service Composition for Service-Oriented Image Processing}}},
  year         = {{2014}},
}

@article{25114,
  abstract     = {{One goal of service-oriented computing is to realize future markets of composed services. In such markets, service providers offer services that can be flexibly combined with each other. However, although crucial for decision-making, market participants are usually not able to individually estimate the quality of traded services in advance. To overcome this problem, we present a conceptual design for a reputation system that collects and processes user feedback on transactions, and provides this information as a signal for quality to participants in the market. Based on our proposed concept, we describe the incorporation of reputation information into distinct decisionmaking processes that are crucial in such service markets. In this context, we present a fuzzy service matching approach that takes reputation information into account. Furthermore, we introduce an adaptive service composition approach, and investigate the impact of exchanging immediate user feedback by reputation information. Last but not least, we describe the importance of reputation information for economic decisions of different market participants. The overall output of this paper is a comprehensive view on managing and exploiting reputation information in markets of composed services using the example of On-The-Fly Computing.}},
  author       = {{Jungmann, Alexander and Brangewitz, Sonja and Petrlic, Ronald and Platenius, Marie Christin}},
  journal      = {{International Journal on Advances in Intelligent Systems 7(3&4)}},
  pages        = {{572--594}},
  title        = {{{Incorporating Reputation Information into Decision-Making Processes in Markets of Composed Services}}},
  year         = {{2014}},
}

@inproceedings{25115,
  abstract     = {{Automatically composing service-based software solutions is still a challenging task. Functional as well as nonfunctional properties have to be considered in order to satisfy individual user requests. Regarding non-functional properties, the composition process can be modeled as optimization problem and solved accordingly. Functional properties, in turn, can be described by means of a formal specification language. Statespace based planning approaches can then be applied to solve the underlying composition problem. However, depending on the expressiveness of the applied formalism and the completeness of the functional descriptions, formally equivalent services may still differ with respect to their implemented functionality. As a consequence, the most appropriate solution for a desired functionality can hardly be determined without considering additional information. In this paper, we demonstrate how to overcome this lack of information by means of Reinforcement Learning. In order to resolve ambiguity, we expand state-space based service composition by a recommendation mechanism that supports decision-making beyond formal specifications. The recommendation mechanism adjusts its recommendation strategy based on feedback from previous composition runs. Image processing serves as case study. Experimental results show the benefit of our proposed solution.}},
  author       = {{Jungmann, Alexander and Mohr, Felix and Kleinjohann, Bernd}},
  booktitle    = {{Proceedings of the 7th International Conference on Service Oriented Computing and Applications (SOCA)}},
  pages        = {{105--112}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{Applying Reinforcement Learning for Resolving Ambiguity in Service Composition}}},
  year         = {{2014}},
}

@article{25116,
  author       = {{Becker, Markus and Kuznik, Christoph}},
  journal      = {{Forum on Specification & Design Languages (FDL 2014)}},
  title        = {{{Fast Many-Worlds Simulation to Resolve Nondeterminism of Fault Effect Propagation}}},
  year         = {{2014}},
}

@inproceedings{25119,
  abstract     = {{In case of a disaster it is of utmost importance to obtain an overview of the affected area in order to coordinate rescue operations. Recent research and development has led to affordable unmanned aerial vehicles (UAVs) and enabled (semi-)autonomous surveillance and mapping of a disaster areas by UAVs. Mapping an area from many images incorporates image registration. Commonly used registration techniques expect static images to be registered. However, while mapping a disaster area, it is very likely that objects like vehicles or persons are moving on the ground, thus leading to dynamic scenes. We formerly introduced a fast and robust approach to register large amounts of successively arriving images, e.g., from UAVs. The approach only uses rigid-body transformations, but the usage of virtual forces enables the registration to tolerate small perspective distortions. This paper investigates the performance of our approach when applied to dynamic scenes, which are represented by nonlinear disturbances within image correspondences. We show that our approach not only tolerates small perspective distortions but also is able to register dynamic scenes.
}},
  author       = {{Stern, Claudius and Kleinjohann, Lisa}},
  booktitle    = {{Proceedings of The 2nd International Conference on Intelligent Systems and Image Processing 2014}},
  location     = {{Kitakyushu, Japan, 26. - 29. Sep. 2014}},
  pages        = {{209--215}},
  publisher    = {{ Institute of Industrial Applications Engineers}},
  title        = {{{Evaluating Influence of Nonlinear Disturbances on Image Registration Based on Virtual Forces}}},
  year         = {{2014}},
}

@inproceedings{25122,
  author       = {{Grösbrink, Stefan and Almeida, Luis}},
  booktitle    = {{19th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)}},
  title        = {{{A Criticality-aware Mapping of Real-time Virtual Machines to Multi-core Processors}}},
  year         = {{2014}},
}

@inproceedings{25145,
  author       = {{Becker, Markus and Kuznik, Christoph and Müller, Wolfgang}},
  booktitle    = {{17th Euromicro Conference on Digital Systems Design (DSD)}},
  title        = {{{Virtual Platforms for Model-Based Design of Dependable Cyber-Physical System Software}}},
  year         = {{2014}},
}

