@inproceedings{47239,
  author       = {{Dechand, Sergej and Schürmann, Dominik and Busse, Karoline and Acar, Yasemin and Fahl, Sascha and Smith, Matthew}},
  booktitle    = {{25th USENIX Security Symposium, USENIX Security 16, Austin, TX, USA, August 10-12, 2016}},
  editor       = {{Holz, Thorsten and Savage, Stefan}},
  pages        = {{193–208}},
  publisher    = {{USENIX Association}},
  title        = {{{An Empirical Study of Textual Key-Fingerprint Representations}}},
  year         = {{2016}},
}

@inproceedings{47241,
  author       = {{Acar, Yasemin and Backes, Michael and Fahl, Sascha and Kim, Doowon and Mazurek, Michelle L. and Stransky, Christian}},
  booktitle    = {{2016 IEEE Symposium on Security and Privacy (SP)}},
  publisher    = {{IEEE}},
  title        = {{{You Get Where You're Looking for: The Impact of Information Sources on Code Security}}},
  doi          = {{10.1109/sp.2016.25}},
  year         = {{2016}},
}

@inproceedings{46365,
  abstract     = {{Despite the intrinsic hardness of the Traveling Salesperson Problem (TSP) heuristic solvers, e.g., LKH+restart and EAX+restart, are remarkably successful in generating satisfactory or even optimal solutions. However, the reasons for their success are not yet fully understood. Recent approaches take an analytical viewpoint and try to identify instance features, which make an instance hard or easy to solve. We contribute to this area by generating instance sets for couples of TSP algorithms A and B by maximizing/minimizing their performance difference in order to generate instances which are easier to solve for one solver and much harder to solve for the other. This instance set offers the potential to identify key features which allow to distinguish between the problem hardness classes of both algorithms.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Festa, P and Sellmann, M and Vanschoren, J}},
  isbn         = {{978-3-319-50348-6}},
  pages        = {{48–59}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Evolving Instances for Maximizing Performance Differences of State-of-The-Art Inexact TSP Solvers}}},
  doi          = {{10.1007/978-3-319-50349-3_4}},
  volume       = {{10079}},
  year         = {{2016}},
}

@inproceedings{46366,
  abstract     = {{State of the Art inexact solvers of the NP-hard Traveling Salesperson Problem (TSP) are known to mostly yield high-quality solutions in reasonable computation times. With the purpose of understanding different levels of instance difficulties, instances for the current State of the Art heuristic TSP solvers LKH+restart and EAX+restart are presented which are evolved using a sophisticated evolutionary algorithm. More specifically, the performance differences of the respective solvers are maximized resulting in instances which are easier to solve for one solver and much more difficult for the other. Focusing on both optimization directions, instance features are identified which characterize both types of instances and increase the understanding of solver performance differences.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{AI*IA 2016 Advances in Artificial Intelligence}},
  editor       = {{Adorni, G and Cagnoni, S and Gori, M and Maratea, M}},
  isbn         = {{978-3-319-49129-5}},
  pages        = {{3–12}},
  publisher    = {{Springer}},
  title        = {{{Understanding Characteristics of Evolved Instances for State-of-the-Art Inexact TSP Solvers with Maximum Performance Difference}}},
  doi          = {{10.1007/978-3-319-49130-1_1}},
  volume       = {{10037}},
  year         = {{2016}},
}

@inproceedings{8159,
  abstract     = {{The Boolean constraint satisfaction problem 3-SAT is arguably the canonical NP-complete problem. In contrast, 2-SAT can not only be decided in polynomial time, but in fact in deterministic linear time. In 2006, Bravyi proposed a physically motivated generalization of k-SAT to the quantum setting, defining the problem "quantum k-SAT". He showed that quantum 2-SAT is also solvable in polynomial time on a classical computer, in particular in deterministic time O(n^4), assuming unit-cost arithmetic over a field extension of the rational numbers, where n is number of variables. In this paper, we present an algorithm for quantum 2-SAT which runs in linear time, i.e. deterministic time O(n+m) for n and m the number of variables and clauses, respectively. Our approach exploits the transfer matrix techniques of Laumann et al. [QIC, 2010] used in the study of phase transitions for random quantum 2-SAT, and bears similarities with both the linear time 2-SAT algorithms of Even, Itai, and Shamir (based on backtracking) [SICOMP, 1976] and Aspvall, Plass, and Tarjan (based on strongly connected components) [IPL, 1979].}},
  author       = {{de Beaudrap, Niel and Gharibian, Sevag}},
  booktitle    = {{Proceedings of the 31st Conference on Computational Complexity (CCC 2016)}},
  editor       = {{Raz, Ran}},
  isbn         = {{978-3-95977-008-8}},
  keywords     = {{quantum 2-SAT, transfer matrix, strongly connected components, limited backtracking, local Hamiltonian}},
  location     = {{Tokyo, Japan}},
  pages        = {{27:1--17:21}},
  publisher    = {{Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik}},
  title        = {{{A Linear Time Algorithm for Quantum 2-SAT}}},
  doi          = {{10.4230/LIPIcs.CCC.2016.27}},
  volume       = {{50}},
  year         = {{2016}},
}

@inbook{29,
  abstract     = {{In this chapter, we present an introduction to the ReconOS operating system for reconfigurable computing. ReconOS offers a unified multi-threaded programming model and operating system services for threads executing in software and threads mapped to reconfigurable hardware. By supporting standard POSIX operating system functions for both software and hardware threads, ReconOS particularly caters to developers with a software background, because developers can use well-known mechanisms such as semaphores, mutexes, condition variables, and message queues for developing hybrid applications with threads running on the CPU and FPGA concurrently. Through the semantic integration of hardware accelerators into a standard operating system environment, ReconOS allows for rapid design space exploration, supports a structured application development process and improves the portability of applications between different reconfigurable computing systems.}},
  author       = {{Agne, Andreas and Platzner, Marco and Plessl, Christian and Happe, Markus and Lübbers, Enno}},
  booktitle    = {{FPGAs for Software Programmers}},
  editor       = {{Koch, Dirk and Hannig, Frank and Ziener, Daniel}},
  isbn         = {{978-3-319-26406-6}},
  pages        = {{227--244}},
  publisher    = {{Springer International Publishing}},
  title        = {{{ReconOS}}},
  doi          = {{10.1007/978-3-319-26408-0_13}},
  year         = {{2016}},
}

@inproceedings{31,
  author       = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G. and Durelli, Gianluca C. and Bolchini, Cristiana}},
  booktitle    = {{Proc. HiPEAC Workshop on Reonfigurable Computing (WRC)}},
  title        = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}},
  year         = {{2016}},
}

@inproceedings{24,
  author       = {{Kenter, Tobias and Plessl, Christian}},
  booktitle    = {{Proc. Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}},
  title        = {{{Microdisk Cavity FDTD Simulation on FPGA using OpenCL}}},
  year         = {{2016}},
}

@inproceedings{25,
  author       = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}},
  booktitle    = {{Workshop on Approximate Computing (AC)}},
  title        = {{{Using Approximate Computing in Scientific Codes}}},
  year         = {{2016}},
}

@inproceedings{138,
  abstract     = {{Hardware accelerators are becoming popular in academia and industry. To move one step further from the state-of-the-art multicore plus accelerator approaches, we present in this paper our innovative SAVEHSA architecture. It comprises of a heterogeneous hardware platform with three different high-end accelerators attached over PCIe (GPGPU, FPGA and Intel MIC). Such systems can process parallel workloads very efficiently whilst being more energy efficient than regular CPU systems. To leverage the heterogeneity, the workload has to be distributed among the computing units in a way that each unit is well-suited for the assigned task and executable code must be available. To tackle this problem we present two software components; the first can perform resource allocation at runtime while respecting system and application goals (in terms of throughput, energy, latency, etc.) and the second is able to analyze an application and generate executable code for an accelerator at runtime. We demonstrate the first proof-of-concept implementation of our framework on the heterogeneous platform, discuss different runtime policies and measure the introduced overheads.}},
  author       = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G.  and Durelli, Gianluca C. and Del Sozzo, Emanuele and Santambrogio, Marco D.  and Bolchini, Christina}},
  booktitle    = {{Proceedings of International Forum on Research and Technologies for Society and Industry (RTSI)}},
  pages        = {{1--5}},
  publisher    = {{IEEE}},
  title        = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}},
  doi          = {{10.1109/RTSI.2016.7740545}},
  year         = {{2016}},
}

@inbook{156,
  abstract     = {{Many modern compute nodes are heterogeneous multi-cores that integrate several CPU cores with fixed function or reconfigurable hardware cores. Such systems need to adapt task scheduling and mapping to optimise for performance and energy under varying workloads and, increasingly important, for thermal and fault management and are thus relevant targets for self-aware computing. In this chapter, we take up the generic reference architecture for designing self-aware and self-expressive computing systems and refine it for heterogeneous multi-cores. We present ReconOS, an architecture, programming model and execution environment for heterogeneous multi-cores, and show how the components of the reference architecture can be implemented on top of ReconOS. In particular, the unique feature of dynamic partial reconfiguration supports self-expression through starting and terminating reconfigurable hardware cores. We detail a case study that runs two applications on an architecture with one CPU and 12 reconfigurable hardware cores and present self-expression strategies for adapting under performance, temperature and even conflicting constraints. The case study demonstrates that the reference architecture as a model for self-aware computing is highly useful as it allows us to structure and simplify the design process, which will be essential for designing complex future compute nodes. Furthermore, ReconOS is used as a base technology for flexible protocol stacks in Chapter 10, an approach for self-aware computing at the networking level.}},
  author       = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}},
  booktitle    = {{Self-aware Computing Systems}},
  pages        = {{145--165}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Self-aware Compute Nodes}}},
  doi          = {{10.1007/978-3-319-39675-0_8}},
  year         = {{2016}},
}

@article{165,
  abstract     = {{A broad spectrum of applications can be accelerated by offloading computation intensive parts to reconfigurable hardware. However, to achieve speedups, the number of loop it- erations (trip count) needs to be sufficiently large to amortize offloading overheads. Trip counts are frequently not known at compile time, but only at runtime just before entering a loop. Therefore, we propose to generate code for both the CPU and the coprocessor, and defer the offloading decision to the application runtime. We demonstrate how a toolflow, based on the LLVM compiler framework, can automatically embed dynamic offloading de- cisions into the application code. We perform in-depth static and dynamic analysis of pop- ular benchmarks, which confirm the general potential of such an approach. We also pro- pose to optimize the offloading process by decoupling the runtime decision from the loop execution (decision slack). The feasibility of our approach is demonstrated by a toolflow that automatically identifies suitable data-parallel loops and generates code for the FPGA coprocessor of a Convey HC-1. We evaluate the integrated toolflow with representative loops executed for different input data sizes.}},
  author       = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}},
  issn         = {{0045-7906}},
  journal      = {{Computers and Electrical Engineering}},
  pages        = {{91--111}},
  publisher    = {{Elsevier}},
  title        = {{{Potential and Methods for Embedding Dynamic Offloading Decisions into Application Code}}},
  doi          = {{10.1016/j.compeleceng.2016.04.021}},
  volume       = {{55}},
  year         = {{2016}},
}

@inproceedings{168,
  abstract     = {{The use of heterogeneous computing resources, such as Graphic Processing Units or other specialized coprocessors, has become widespread in recent years because of their per- formance and energy efficiency advantages. Approaches for managing and scheduling tasks to heterogeneous resources are still subject to research. Although queuing systems have recently been extended to support accelerator resources, a general solution that manages heterogeneous resources at the operating system- level to exploit a global view of the system state is still missing.In this paper we present a user space scheduler that enables task scheduling and migration on heterogeneous processing resources in Linux. Using run queues for available resources we perform scheduling decisions based on the system state and on task characterization from earlier measurements. With a pro- gramming pattern that supports the integration of checkpoints into applications, we preempt tasks and migrate them between three very different compute resources. Considering static and dynamic workload scenarios, we show that this approach can gain up to 17% performance, on average 7%, by effectively avoiding idle resources. We demonstrate that a work-conserving strategy without migration is no suitable alternative.}},
  author       = {{Lösch, Achim and Beisel, Tobias and Kenter, Tobias and Plessl, Christian and Platzner, Marco}},
  booktitle    = {{Proceedings of the 2016 Design, Automation & Test in Europe Conference & Exhibition (DATE)}},
  pages        = {{912--917}},
  publisher    = {{EDA Consortium / IEEE}},
  title        = {{{Performance-centric scheduling with task migration for a heterogeneous compute node in the data center}}},
  year         = {{2016}},
}

@inproceedings{171,
  author       = {{Kenter, Tobias and Vaz, Gavin Francis and Riebler, Heinrich and Plessl, Christian}},
  booktitle    = {{Workshop on Reconfigurable Computing (WRC)}},
  title        = {{{Opportunities for deferring application partitioning and accelerator synthesis to runtime (extended abstract)}}},
  year         = {{2016}},
}

@article{11989,
  author       = {{Campolo, Claudia and Cheng, Lin and Sommer, Christoph and Tsai, Hsin-Mu}},
  issn         = {{0140-3664}},
  journal      = {{Computer Communications}},
  pages        = {{1--2}},
  title        = {{{Special Issue on Multi-radio, Multi-technology, Multi-system Vehicular Communications}}},
  doi          = {{10.1016/j.comcom.2016.09.003}},
  year         = {{2016}},
}

@article{60437,
  abstract     = {{<jats:p>Parametrization based methods have recently become very popular for the generation of high quality quad meshes. In contrast to previous approaches, they allow for intuitive user control in order to accommodate all kinds of application driven constraints and design intentions. A major obstacle in practice, however, are the relatively long computations that lead to response times of several minutes already for input models of moderate complexity. In this paper we introduce a novel strategy to handle highly complex input meshes with up to several millions of triangles such that quad meshes can still be created and edited within an interactive workflow. Our method is based on representing the input model on different levels of resolution with a mechanism to propagate parametrizations from coarser to finer levels. The major challenge is to guarantee consistent parametrizations even in the presence of charts, transition functions, and singularities. Moreover, the remaining degrees of freedom on coarser levels of resolution have to be chosen carefully in order to still achieve low distortion parametrizations. We demonstrate a prototypic system where the user can interactively edit quad meshes with powerful high-level operations such as guiding constraints, singularity repositioning, and singularity connections.</jats:p>}},
  author       = {{Ebke, Hans-Christian and Schmidt, Patrick and Campen, Marcel and Kobbelt, Leif}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--13}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Interactively controlled quad remeshing of high resolution 3D models}}},
  doi          = {{10.1145/2980179.2982413}},
  volume       = {{35}},
  year         = {{2016}},
}

@article{60435,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Various applications of global surface parametrization benefit from the alignment of parametrization isolines with principal curvature directions. This is particularly true for recent parametrization‐based meshing approaches, where this directly translates into a shape‐aware edge flow, better approximation quality, and reduced meshing artifacts. Existing methods to influence a parametrization based on principal curvature directions suffer from scale‐dependence, which implies the necessity of parameter variation, or try to capture complex directional shape features using simple 1D curves. Especially for non‐sharp features, such as chamfers, fillets, blends, and even more for organic variants thereof, these abstractions can be unfit. We present a novel approach which respects and exploits the 2D nature of such directional feature regions, detects them based on coherence and homogeneity properties, and controls the parametrization process accordingly. This approach enables us to provide an intuitive, scale‐invariant control parameter to the user. It also allows us to consider non‐local aspects like the topology of a feature, enabling further improvements. We demonstrate that, compared to previous approaches, global parametrizations of higher quality can be generated without user intervention.</jats:p>}},
  author       = {{Campen, Marcel and Ibing, Moritz and Ebke, Hans‐Christian and Zorin, Denis and Kobbelt, Leif}},
  issn         = {{0167-7055}},
  journal      = {{Computer Graphics Forum}},
  number       = {{5}},
  pages        = {{1--10}},
  publisher    = {{Wiley}},
  title        = {{{Scale‐Invariant Directional Alignment of Surface Parametrizations}}},
  doi          = {{10.1111/cgf.12958}},
  volume       = {{35}},
  year         = {{2016}},
}

@article{60434,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Direction fields and vector fields play an increasingly important role in computer graphics and geometry processing. The synthesis of directional fields on surfaces, or other spatial domains, is a fundamental step in numerous applications, such as mesh generation, deformation, texture mapping, and many more. The wide range of applications resulted in definitions for many types of directional fields: from vector and tensor fields, over line and cross fields, to frame and vector‐set fields. Depending on the application at hand, researchers have used various notions of objectives and constraints to synthesize such fields. These notions are defined in terms of fairness, feature alignment, symmetry, or field topology, to mention just a few. To facilitate these objectives, various representations, discretizations, and optimization strategies have been developed. These choices come with varying strengths and weaknesses. This report provides a systematic overview of directional field synthesis for graphics applications, the challenges it poses, and the methods developed in recent years to address these challenges.</jats:p>}},
  author       = {{Vaxman, Amir and Campen, Marcel and Diamanti, Olga and Panozzo, Daniele and Bommes, David and Hildebrandt, Klaus and Ben‐Chen, Mirela}},
  issn         = {{0167-7055}},
  journal      = {{Computer Graphics Forum}},
  number       = {{2}},
  pages        = {{545--572}},
  publisher    = {{Wiley}},
  title        = {{{Directional Field Synthesis, Design, and Processing}}},
  doi          = {{10.1111/cgf.12864}},
  volume       = {{35}},
  year         = {{2016}},
}

@article{60436,
  abstract     = {{<jats:p>This paper presents a method for bijective parametrization of 2D and 3D objects over canonical domains. While a range of solutions for the two-dimensional case are well-known, our method guarantees bijectivity of mappings also for a large, combinatorially-defined class of tetrahedral meshes (shellable meshes). The key concept in our method is the piecewise-linear (PL) foliation, decomposing the mesh into one-dimensional submanifolds and reducing the mapping problem to parametrization of a lower-dimensional manifold (a foliation section). The maps resulting from these foliations are proved to be bijective and continuous, and shown to have provably bijective PL approximations. We describe exact, numerically robust evaluation methods and demonstrate our implementation's capabilities on a large variety of meshes.</jats:p>}},
  author       = {{Campen, Marcel and Silva, Cláudio T. and Zorin, Denis}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{4}},
  pages        = {{1--15}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Bijective maps from simplicial foliations}}},
  doi          = {{10.1145/2897824.2925890}},
  volume       = {{35}},
  year         = {{2016}},
}

@article{52870,
  author       = {{Raket, Lars Lau and Grimme, Britta and Schöner, Gregor and Igel, Christian and Markussen, Bo}},
  journal      = {{PLoS Computational Biology}},
  number       = {{9}},
  pages        = {{e1005092}},
  publisher    = {{Public Library of Science San Francisco, CA USA}},
  title        = {{{Separating timing, movement conditions and individual differences in the analysis of human movement}}},
  volume       = {{12}},
  year         = {{2016}},
}

