@article{20, abstract = {{Approximate computing has shown to provide new ways to improve performance and power consumption of error-resilient applications. While many of these applications can be found in image processing, data classification or machine learning, we demonstrate its suitability to a problem from scientific computing. Utilizing the self-correcting behavior of iterative algorithms, we show that approximate computing can be applied to the calculation of inverse matrix p-th roots which are required in many applications in scientific computing. Results show great opportunities to reduce the computational effort and bandwidth required for the execution of the discussed algorithm, especially when targeting special accelerator hardware.}}, author = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}}, issn = {{1943-0671}}, journal = {{Embedded Systems Letters}}, number = {{2}}, pages = {{ 33--36}}, publisher = {{IEEE}}, title = {{{Using Approximate Computing for the Calculation of Inverse Matrix p-th Roots}}}, doi = {{10.1109/LES.2017.2760923}}, volume = {{10}}, year = {{2018}}, } @inproceedings{22, abstract = {{This paper describes a data structure and a heuristic to plan and map arbitrary resources in complex combinations while applying time dependent constraints. The approach is used in the planning based workload manager OpenCCS at the Paderborn Center for Parallel Computing (PC\(^2\)) to operate heterogeneous clusters with up to 10000 cores. We also show performance results derived from four years of operation.}}, author = {{Keller, Axel}}, booktitle = {{Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)}}, editor = {{Klusáček, D. and Cirne, W. and Desai, N.}}, isbn = {{978-3-319-77398-8}}, keywords = {{Scheduling Planning Mapping Workload management}}, location = {{Orlando, FL, USA}}, pages = {{132--151}}, publisher = {{Springer}}, title = {{{A Data Structure for Planning Based Workload Management of Heterogeneous HPC Systems}}}, doi = {{10.1007/978-3-319-77398-8_8}}, volume = {{10773}}, year = {{2018}}, } @article{6516, author = {{Mertens, Jan Cedric and Boschmann, Alexander and Schmidt, M. and Plessl, Christian}}, issn = {{1369-7072}}, journal = {{Sports Engineering}}, number = {{4}}, pages = {{441--451}}, publisher = {{Springer Nature}}, title = {{{Sprint diagnostic with GPS and inertial sensor fusion}}}, doi = {{10.1007/s12283-018-0291-0}}, volume = {{21}}, year = {{2018}}, } @article{13348, author = {{Luk, Samuel M. H. and Lewandowski, P. and Kwong, N. H. and Baudin, E. and Lafont, O. and Tignon, J. and Leung, P. T. and Chan, Ch. K. P. and Babilon, M. and Schumacher, Stefan and Binder, R.}}, issn = {{0740-3224}}, journal = {{Journal of the Optical Society of America B}}, number = {{1}}, title = {{{Theory of optically controlled anisotropic polariton transport in semiconductor double microcavities}}}, doi = {{10.1364/josab.35.000146}}, volume = {{35}}, year = {{2018}}, } @inproceedings{1588, abstract = {{The exploration of FPGAs as accelerators for scientific simulations has so far mostly been focused on small kernels of methods working on regular data structures, for example in the form of stencil computations for finite difference methods. In computational sciences, often more advanced methods are employed that promise better stability, convergence, locality and scaling. Unstructured meshes are shown to be more effective and more accurate, compared to regular grids, in representing computation domains of various shapes. Using unstructured meshes, the discontinuous Galerkin method preserves the ability to perform explicit local update operations for simulations in the time domain. In this work, we investigate FPGAs as target platform for an implementation of the nodal discontinuous Galerkin method to find time-domain solutions of Maxwell's equations in an unstructured mesh. When maximizing data reuse and fitting constant coefficients into suitably partitioned on-chip memory, high computational intensity allows us to implement and feed wide data paths with hundreds of floating point operators. By decoupling off-chip memory accesses from the computations, high memory bandwidth can be sustained, even for the irregular access pattern required by parts of the application. Using the Intel/Altera OpenCL SDK for FPGAs, we present different implementation variants for different polynomial orders of the method. In different phases of the algorithm, either computational or bandwidth limits of the Arria 10 platform are almost reached, thus outperforming a highly multithreaded CPU implementation by around 2x.}}, author = {{Kenter, Tobias and Mahale, Gopinath and Alhaddad, Samer and Grynko, Yevgen and Schmitt, Christian and Afzal, Ayesha and Hannig, Frank and Förstner, Jens and Plessl, Christian}}, booktitle = {{Proc. Int. Symp. on Field-Programmable Custom Computing Machines (FCCM)}}, keywords = {{tet_topic_hpc}}, publisher = {{IEEE}}, title = {{{OpenCL-based FPGA Design to Accelerate the Nodal Discontinuous Galerkin Method for Unstructured Meshes}}}, doi = {{10.1109/FCCM.2018.00037}}, year = {{2018}}, } @inproceedings{1590, abstract = {{We present the submatrix method, a highly parallelizable method for the approximate calculation of inverse p-th roots of large sparse symmetric matrices which are required in different scientific applications. Following the idea of Approximate Computing, we allow imprecision in the final result in order to utilize the sparsity of the input matrix and to allow massively parallel execution. For an n x n matrix, the proposed algorithm allows to distribute the calculations over n nodes with only little communication overhead. The result matrix exhibits the same sparsity pattern as the input matrix, allowing for efficient reuse of allocated data structures. We evaluate the algorithm with respect to the error that it introduces into calculated results, as well as its performance and scalability. We demonstrate that the error is relatively limited for well-conditioned matrices and that results are still valuable for error-resilient applications like preconditioning even for ill-conditioned matrices. We discuss the execution time and scaling of the algorithm on a theoretical level and present a distributed implementation of the algorithm using MPI and OpenMP. We demonstrate the scalability of this implementation by running it on a high-performance compute cluster comprised of 1024 CPU cores, showing a speedup of 665x compared to single-threaded execution.}}, author = {{Lass, Michael and Mohr, Stephan and Wiebeler, Hendrik and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Proc. Platform for Advanced Scientific Computing (PASC) Conference}}, isbn = {{978-1-4503-5891-0/18/07}}, keywords = {{approximate computing, linear algebra, matrix inversion, matrix p-th roots, numeric algorithm, parallel computing}}, location = {{Basel, Switzerland}}, publisher = {{ACM}}, title = {{{A Massively Parallel Algorithm for the Approximate Calculation of Inverse p-th Roots of Large Sparse Matrices}}}, doi = {{10.1145/3218176.3218231}}, year = {{2018}}, } @inproceedings{1204, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proc. ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (PPoPP)}}, isbn = {{9781450349826}}, keywords = {{htrop}}, publisher = {{ACM}}, title = {{{Automated Code Acceleration Targeting Heterogeneous OpenCL Devices}}}, doi = {{10.1145/3178487.3178534}}, year = {{2018}}, } @article{18, abstract = {{Branch and bound (B&B) algorithms structure the search space as a tree and eliminate infeasible solutions early by pruning subtrees that cannot lead to a valid or optimal solution. Custom hardware designs significantly accelerate the execution of these algorithms. In this article, we demonstrate a high-performance B&B implementation on FPGAs. First, we identify general elements of B&B algorithms and describe their implementation as a finite state machine. Then, we introduce workers that autonomously cooperate using work stealing to allow parallel execution and full utilization of the target FPGA. Finally, we explore advantages of instance-specific designs that target a specific problem instance to improve performance. We evaluate our concepts by applying them to a branch and bound problem, the reconstruction of corrupted AES keys obtained from cold-boot attacks. The evaluation shows that our work stealing approach is scalable with the available resources and provides speedups proportional to the number of workers. Instance-specific designs allow us to achieve an overall speedup of 47 × compared to the fastest implementation of AES key reconstruction so far. Finally, we demonstrate how instance-specific designs can be generated just-in-time such that the provided speedups outweigh the additional time required for design synthesis.}}, author = {{Riebler, Heinrich and Lass, Michael and Mittendorf, Robert and Löcke, Thomas and Plessl, Christian}}, issn = {{1936-7406}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems (TRETS)}}, keywords = {{coldboot}}, number = {{3}}, pages = {{24:1--24:23}}, publisher = {{Association for Computing Machinery (ACM)}}, title = {{{Efficient Branch and Bound on FPGAs Using Work Stealing and Instance-Specific Designs}}}, doi = {{10.1145/3053687}}, volume = {{10}}, year = {{2017}}, } @inproceedings{1592, abstract = {{Compared to classical HDL designs, generating FPGA with high-level synthesis from an OpenCL specification promises easier exploration of different design alternatives and, through ready-to-use infrastructure and common abstractions for host and memory interfaces, easier portability between different FPGA families. In this work, we evaluate the extent of this promise. To this end, we present a parameterized FDTD implementation for photonic microcavity simulations. Our design can trade-off different forms of parallelism and works for two independent OpenCL-based FPGA design flows. Hence, we can target FPGAs from different vendors and different FPGA families. We describe how we used pre-processor macros to achieve this flexibility and to work around different shortcomings of the current tools. Choosing the right design configurations, we are able to present two extremely competitive solutions for very different FPGA targets, reaching up to 172 GFLOPS sustained performance. With the portability and flexibility demonstrated, code developers not only avoid vendor lock-in, but can even make best use of real trade-offs between different architectures.}}, author = {{Kenter, Tobias and Förstner, Jens and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on Field Programmable Logic and Applications (FPL)}}, keywords = {{tet_topic_hpc}}, publisher = {{IEEE}}, title = {{{Flexible FPGA design for FDTD using OpenCL}}}, doi = {{10.23919/FPL.2017.8056844}}, year = {{2017}}, } @article{1589, author = {{Schumacher, Jörn and Plessl, Christian and Vandelli, Wainer}}, journal = {{Journal of Physics: Conference Series}}, publisher = {{IOP Publishing}}, title = {{{High-Throughput and Low-Latency Network Communication with NetIO}}}, doi = {{10.1088/1742-6596/898/8/082003}}, volume = {{898}}, year = {{2017}}, } @phdthesis{33, abstract = {{Lightweight materials play an ever growing role in today's world. Saving on the mass of a machine will usually translate into a lower energy consumption. However, lightweight applications are prone to develop performance problems due to vibration induced by the operation of the machine. The Fraunhofer Institute for Manufacturing Technology and Advanced Materials in Dresden conducts research into the damping properties of composite materials. They are experimenting with hollow, particle filled spheres embedded in the lightweight material. Such a system is the technical motivation of this thesis. Ultimately, a numerical experiment to derive the coefficient of restitution is required. The simulation developed in this thesis is based on a discrete element method to track the individual particle and sphere trajectories. Based on a potential based approach for the particle interactions deployed in molecular dynamics, the behavior of the particles can be controlled effectively. The simulated volume is using reflecting boundaries and encloses the hollow sphere. In this work, a highly flexible memory structure was used with a linked cell approach to cope with the highly flexible mass of particles. This allows for a linear complexity of the method in regard to the particle number by reducing the computational overhead of the interaction computation. Multiple numerical experiments show the great effect the particles have on the damping behavior of the system.}}, author = {{Steinle, Tobias}}, title = {{{Modeling and simulation of metallic, particle-damped spheres for lightweight materials}}}, year = {{2016}}, } @inproceedings{34, author = {{Dellnitz, Michael and Eckstein, Julian and Flaßkamp, Kathrin and Friedel, Patrick and Horenkamp, Christian and Köhler, Ulrich and Ober-Blöbaum, Sina and Peitz, Sebastian and Tiemeyer, Sebastian}}, booktitle = {{Progress in Industrial Mathematics at ECMI}}, issn = {{2212-0173}}, pages = {{633--641}}, publisher = {{Springer International Publishing}}, title = {{{Multiobjective Optimal Control Methods for the Development of an Intelligent Cruise Control}}}, doi = {{10.1007/978-3-319-23413-7_87}}, volume = {{22}}, year = {{2016}}, } @inproceedings{19, abstract = {{Version Control Systems (VCS) are a valuable tool for software development and document management. Both client/server and distributed (Peer-to-Peer) models exist, with the latter (e.g., Git and Mercurial) becoming increasingly popular. Their distributed nature introduces complications, especially concerning security: it is hard to control the dissemination of contents stored in distributed VCS as they rely on replication of complete repositories to any involved user. We overcome this issue by designing and implementing a concept for cryptography-enforced access control which is transparent to the user. Use of field-tested schemes (end-to-end encryption, digital signatures) allows for strong security, while adoption of convergent encryption and content-defined chunking retains storage efficiency. The concept is seamlessly integrated into Mercurial---respecting its distributed storage concept---to ensure practical usability and compatibility to existing deployments.}}, author = {{Lass, Michael and Leibenger, Dominik and Sorge, Christoph}}, booktitle = {{Proc. 41st Conference on Local Computer Networks (LCN)}}, isbn = {{978-1-5090-2054-6}}, keywords = {{access control, distributed version control systems, mercurial, peer-to-peer, convergent encryption, confidentiality, authenticity}}, publisher = {{IEEE}}, title = {{{Confidentiality and Authenticity for Distributed Version Control Systems - A Mercurial Extension}}}, doi = {{10.1109/lcn.2016.11}}, year = {{2016}}, } @phdthesis{161, author = {{Kenter, Tobias}}, publisher = {{Universität Paderborn}}, title = {{{Reconfigurable Accelerators in the World of General-Purpose Computing}}}, year = {{2016}}, } @inbook{29, abstract = {{In this chapter, we present an introduction to the ReconOS operating system for reconfigurable computing. ReconOS offers a unified multi-threaded programming model and operating system services for threads executing in software and threads mapped to reconfigurable hardware. By supporting standard POSIX operating system functions for both software and hardware threads, ReconOS particularly caters to developers with a software background, because developers can use well-known mechanisms such as semaphores, mutexes, condition variables, and message queues for developing hybrid applications with threads running on the CPU and FPGA concurrently. Through the semantic integration of hardware accelerators into a standard operating system environment, ReconOS allows for rapid design space exploration, supports a structured application development process and improves the portability of applications between different reconfigurable computing systems.}}, author = {{Agne, Andreas and Platzner, Marco and Plessl, Christian and Happe, Markus and Lübbers, Enno}}, booktitle = {{FPGAs for Software Programmers}}, editor = {{Koch, Dirk and Hannig, Frank and Ziener, Daniel}}, isbn = {{978-3-319-26406-6}}, pages = {{227--244}}, publisher = {{Springer International Publishing}}, title = {{{ReconOS}}}, doi = {{10.1007/978-3-319-26408-0_13}}, year = {{2016}}, } @inproceedings{31, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G. and Durelli, Gianluca C. and Bolchini, Cristiana}}, booktitle = {{Proc. HiPEAC Workshop on Reonfigurable Computing (WRC)}}, title = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}}, year = {{2016}}, } @inproceedings{24, author = {{Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proc. Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}}, title = {{{Microdisk Cavity FDTD Simulation on FPGA using OpenCL}}}, year = {{2016}}, } @inproceedings{25, author = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Workshop on Approximate Computing (AC)}}, title = {{{Using Approximate Computing in Scientific Codes}}}, year = {{2016}}, } @inproceedings{138, abstract = {{Hardware accelerators are becoming popular in academia and industry. To move one step further from the state-of-the-art multicore plus accelerator approaches, we present in this paper our innovative SAVEHSA architecture. It comprises of a heterogeneous hardware platform with three different high-end accelerators attached over PCIe (GPGPU, FPGA and Intel MIC). Such systems can process parallel workloads very efficiently whilst being more energy efficient than regular CPU systems. To leverage the heterogeneity, the workload has to be distributed among the computing units in a way that each unit is well-suited for the assigned task and executable code must be available. To tackle this problem we present two software components; the first can perform resource allocation at runtime while respecting system and application goals (in terms of throughput, energy, latency, etc.) and the second is able to analyze an application and generate executable code for an accelerator at runtime. We demonstrate the first proof-of-concept implementation of our framework on the heterogeneous platform, discuss different runtime policies and measure the introduced overheads.}}, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G. and Durelli, Gianluca C. and Del Sozzo, Emanuele and Santambrogio, Marco D. and Bolchini, Christina}}, booktitle = {{Proceedings of International Forum on Research and Technologies for Society and Industry (RTSI)}}, pages = {{1--5}}, publisher = {{IEEE}}, title = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}}, doi = {{10.1109/RTSI.2016.7740545}}, year = {{2016}}, } @inbook{156, abstract = {{Many modern compute nodes are heterogeneous multi-cores that integrate several CPU cores with fixed function or reconfigurable hardware cores. Such systems need to adapt task scheduling and mapping to optimise for performance and energy under varying workloads and, increasingly important, for thermal and fault management and are thus relevant targets for self-aware computing. In this chapter, we take up the generic reference architecture for designing self-aware and self-expressive computing systems and refine it for heterogeneous multi-cores. We present ReconOS, an architecture, programming model and execution environment for heterogeneous multi-cores, and show how the components of the reference architecture can be implemented on top of ReconOS. In particular, the unique feature of dynamic partial reconfiguration supports self-expression through starting and terminating reconfigurable hardware cores. We detail a case study that runs two applications on an architecture with one CPU and 12 reconfigurable hardware cores and present self-expression strategies for adapting under performance, temperature and even conflicting constraints. The case study demonstrates that the reference architecture as a model for self-aware computing is highly useful as it allows us to structure and simplify the design process, which will be essential for designing complex future compute nodes. Furthermore, ReconOS is used as a base technology for flexible protocol stacks in Chapter 10, an approach for self-aware computing at the networking level.}}, author = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}}, booktitle = {{Self-aware Computing Systems}}, pages = {{145--165}}, publisher = {{Springer International Publishing}}, title = {{{Self-aware Compute Nodes}}}, doi = {{10.1007/978-3-319-39675-0_8}}, year = {{2016}}, } @article{165, abstract = {{A broad spectrum of applications can be accelerated by offloading computation intensive parts to reconfigurable hardware. However, to achieve speedups, the number of loop it- erations (trip count) needs to be sufficiently large to amortize offloading overheads. Trip counts are frequently not known at compile time, but only at runtime just before entering a loop. Therefore, we propose to generate code for both the CPU and the coprocessor, and defer the offloading decision to the application runtime. We demonstrate how a toolflow, based on the LLVM compiler framework, can automatically embed dynamic offloading de- cisions into the application code. We perform in-depth static and dynamic analysis of pop- ular benchmarks, which confirm the general potential of such an approach. We also pro- pose to optimize the offloading process by decoupling the runtime decision from the loop execution (decision slack). The feasibility of our approach is demonstrated by a toolflow that automatically identifies suitable data-parallel loops and generates code for the FPGA coprocessor of a Convey HC-1. We evaluate the integrated toolflow with representative loops executed for different input data sizes.}}, author = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}}, issn = {{0045-7906}}, journal = {{Computers and Electrical Engineering}}, pages = {{91--111}}, publisher = {{Elsevier}}, title = {{{Potential and Methods for Embedding Dynamic Offloading Decisions into Application Code}}}, doi = {{10.1016/j.compeleceng.2016.04.021}}, volume = {{55}}, year = {{2016}}, } @inproceedings{168, abstract = {{The use of heterogeneous computing resources, such as Graphic Processing Units or other specialized coprocessors, has become widespread in recent years because of their per- formance and energy efficiency advantages. Approaches for managing and scheduling tasks to heterogeneous resources are still subject to research. Although queuing systems have recently been extended to support accelerator resources, a general solution that manages heterogeneous resources at the operating system- level to exploit a global view of the system state is still missing.In this paper we present a user space scheduler that enables task scheduling and migration on heterogeneous processing resources in Linux. Using run queues for available resources we perform scheduling decisions based on the system state and on task characterization from earlier measurements. With a pro- gramming pattern that supports the integration of checkpoints into applications, we preempt tasks and migrate them between three very different compute resources. Considering static and dynamic workload scenarios, we show that this approach can gain up to 17% performance, on average 7%, by effectively avoiding idle resources. We demonstrate that a work-conserving strategy without migration is no suitable alternative.}}, author = {{Lösch, Achim and Beisel, Tobias and Kenter, Tobias and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proceedings of the 2016 Design, Automation & Test in Europe Conference & Exhibition (DATE)}}, pages = {{912--917}}, publisher = {{EDA Consortium / IEEE}}, title = {{{Performance-centric scheduling with task migration for a heterogeneous compute node in the data center}}}, year = {{2016}}, } @inproceedings{171, author = {{Kenter, Tobias and Vaz, Gavin Francis and Riebler, Heinrich and Plessl, Christian}}, booktitle = {{Workshop on Reconfigurable Computing (WRC)}}, title = {{{Opportunities for deferring application partitioning and accelerator synthesis to runtime (extended abstract)}}}, year = {{2016}}, } @article{1769, abstract = {{Große zylindrische Stahlprüflinge werden mittels der Methode der finiten Differenzen im Zeitbereich (engl. finite differences in time domain, FDTD) simulativ untersucht. Dabei werden Pitch-Catch-Messanordnungen verwendet. Es werden zwei Bildgebungsansätze vorgestellt: ersterer basiert auf dem Imaging Principle nach Claerbout, letzterer basiert auf gradientenbasierter Optimierung eines Zielfunktionals.}}, author = {{Hegler, Sebastian and Statz, Christoph and Mütze, Marco and Mooshofer, Hubert and Goldammer, Matthias and Fendt, Karl and Schwarzer, Stefan and Feldhoff, Kim and Flehmig, Martin and Markwardt, Ulf and E. Nagel, Wolfgang and Schütte, Maria and Walther, Andrea and Meinel, Michael and Basermann, Achim and Plettemeier, Dirk}}, journal = {{tm - Technisches Messen}}, number = {{9}}, pages = {{440--450}}, publisher = {{Walter de Gruyter}}, title = {{{Simulative Ultraschall-Untersuchung von Pitch-Catch-Messanordnungen für große zylindrische Stahl-Prüflinge und gradientenbasierte Bildgebung}}}, doi = {{doi:10.1515/teme-2015-0031}}, volume = {{82}}, year = {{2015}}, } @article{1772, author = {{Torresen, Jim and Plessl, Christian and Yao, Xin}}, journal = {{IEEE Computer}}, keywords = {{self-awareness, self-expression}}, number = {{7}}, pages = {{18--20}}, publisher = {{IEEE Computer Society}}, title = {{{Self-Aware and Self-Expressive Systems – Guest Editor's Introduction}}}, doi = {{10.1109/MC.2015.205}}, volume = {{48}}, year = {{2015}}, } @article{1774, abstract = {{In this article an efficient numerical method to solve multiobjective optimization problems for fluid flow governed by the Navier Stokes equations is presented. In order to decrease the computational effort, a reduced order model is introduced using Proper Orthogonal Decomposition and a corresponding Galerkin Projection. A global, derivative free multiobjective optimization algorithm is applied to compute the Pareto set (i.e. the set of optimal compromises) for the concurrent objectives minimization of flow field fluctuations and control cost. The method is illustrated for a 2D flow around a cylinder at Re = 100.}}, author = {{Peitz, Sebastian and Dellnitz, Michael}}, issn = {{1617-7061}}, journal = {{PAMM}}, number = {{1}}, pages = {{613--614}}, publisher = {{WILEY-VCH Verlag}}, title = {{{Multiobjective Optimization of the Flow Around a Cylinder Using Model Order Reduction}}}, doi = {{10.1002/pamm.201510296}}, volume = {{15}}, year = {{2015}}, } @phdthesis{10624, abstract = {{The use of heterogeneous computing resources, such as graphics processing units or other specialized co-processors, has become widespread in recent years because of their performance and energy efficiency advantages. Operating system approaches that are limited to optimizing CPU usage are no longer sufficient for the efficient utilization of systems that comprise diverse resource types. Enabling task preemption on these architectures and migration of tasks between different resource types at run-time is not only key to improving the performance and energy consumption but also to enabling automatic scheduling methods for heterogeneous compute nodes. This thesis proposes novel techniques for run-time management of heterogeneous resources and enabling tasks to migrate between diverse hardware. It provides fundamental work towards future operating systems by discussing implications, limitations, and chances of the heterogeneity and introducing solutions for energy- and performance-efficient run-time systems. Scheduling methods to utilize heterogeneous systems by the use of a centralized scheduler are presented that show benefits over existing approaches in varying case studies.}}, author = {{Beisel, Tobias}}, isbn = {{978-3-8325-4155-2}}, pages = {{183}}, publisher = {{Logos Verlag Berlin GmbH}}, title = {{{Management and Scheduling of Accelerators for Heterogeneous High-Performance Computing}}}, year = {{2015}}, } @article{296, abstract = {{FPGAs are known to permit huge gains in performance and efficiency for suitable applications but still require reduced design efforts and shorter development cycles for wider adoption. In this work, we compare the resulting performance of two design concepts that in different ways promise such increased productivity. As common starting point, we employ a kernel-centric design approach, where computational hotspots in an application are identified and individually accelerated on FPGA. By means of a complex stereo matching application, we evaluate two fundamentally different design philosophies and approaches for implementing the required kernels on FPGAs. In the first implementation approach, we designed individually specialized data flow kernels in a spatial programming language for a Maxeler FPGA platform; in the alternative design approach, we target a vector coprocessor with large vector lengths, which is implemented as a form of programmable overlay on the application FPGAs of a Convey HC-1. We assess both approaches in terms of overall system performance, raw kernel performance, and performance relative to invested resources. After compensating for the effects of the underlying hardware platforms, the specialized dataflow kernels on the Maxeler platform are around 3x faster than kernels executing on the Convey vector coprocessor. In our concrete scenario, due to trade-offs between reconfiguration overheads and exposed parallelism, the advantage of specialized dataflow kernels is reduced to around 2.5x.}}, author = {{Kenter, Tobias and Schmitz, Henning and Plessl, Christian}}, journal = {{International Journal of Reconfigurable Computing (IJRC)}}, publisher = {{Hindawi}}, title = {{{Exploring Tradeoffs between Specialized Kernels and a Reusable Overlay in a Stereo-Matching Case Study}}}, doi = {{10.1155/2015/859425}}, volume = {{2015}}, year = {{2015}}, } @inproceedings{303, abstract = {{This paper introduces Binary Acceleration At Runtime(BAAR), an easy-to-use on-the-fly binary acceleration mechanismwhich aims to tackle the problem of enabling existentsoftware to automatically utilize accelerators at runtime. BAARis based on the LLVM Compiler Infrastructure and has aclient-server architecture. The client runs the program to beaccelerated in an environment which allows program analysisand profiling. Program parts which are identified as suitable forthe available accelerator are exported and sent to the server.The server optimizes these program parts for the acceleratorand provides RPC execution for the client. The client transformsits program to utilize accelerated execution on the server foroffloaded program parts. We evaluate our work with a proofof-concept implementation of BAAR that uses an Intel XeonPhi 5110P as the acceleration target and performs automaticoffloading, parallelization and vectorization of suitable programparts. The practicality of BAAR for real-world examples is shownbased on a study of stencil codes. Our results show a speedup ofup to 4 without any developer-provided hints and 5.77 withhints over the same code compiled with the Intel Compiler atoptimization level O2 and running on an Intel Xeon E5-2670machine. Based on our insights gained during implementationand evaluation we outline future directions of research, e.g.,offloading more fine-granular program parts than functions, amore sophisticated communication mechanism or introducing onstack-replacement.}}, author = {{Damschen, Marvin and Plessl, Christian}}, booktitle = {{Proceedings of the 5th International Workshop on Adaptive Self-tuning Computing Systems (ADAPT)}}, title = {{{Easy-to-Use On-The-Fly Binary Program Acceleration on Many-Cores}}}, year = {{2015}}, } @inproceedings{1773, author = {{Schumacher, Jörn and T. Anderson, J. and Borga, A. and Boterenbrood, H. and Chen, H. and Chen, K. and Drake, G. and Francis, D. and Gorini, B. and Lanni, F. and Lehmann-Miotto, Giovanna and Levinson, L. and Narevicius, J. and Plessl, Christian and Roich, A. and Ryu, S. and P. Schreuder, F. and Vandelli, Wainer and Vermeulen, J. and Zhang, J.}}, booktitle = {{Proc. Int. Conf. on Distributed Event-Based Systems (DEBS)}}, publisher = {{ACM}}, title = {{{Improving Packet Processing Performance in the ATLAS FELIX Project – Analysis and Optimization of a Memory-Bounded Algorithm}}}, doi = {{10.1145/2675743.2771824}}, year = {{2015}}, } @article{1768, author = {{Plessl, Christian and Platzner, Marco and Schreier, Peter J.}}, journal = {{Informatik Spektrum}}, keywords = {{approximate computing, survey}}, number = {{5}}, pages = {{396--399}}, publisher = {{Springer}}, title = {{{Aktuelles Schlagwort: Approximate Computing}}}, doi = {{10.1007/s00287-015-0911-z}}, year = {{2015}}, } @inproceedings{238, abstract = {{In this paper, we study how binary applications can be transparently accelerated with novel heterogeneous computing resources without requiring any manual porting or developer-provided hints. Our work is based on Binary Acceleration At Runtime (BAAR), our previously introduced binary acceleration mechanism that uses the LLVM Compiler Infrastructure. BAAR is designed as a client-server architecture. The client runs the program to be accelerated in an environment, which allows program analysis and profiling and identifies and extracts suitable program parts to be offloaded. The server compiles and optimizes these offloaded program parts for the accelerator and offers access to these functions to the client with a remote procedure call (RPC) interface. Our previous work proved the feasibility of our approach, but also showed that communication time and overheads limit the granularity of functions that can be meaningfully offloaded. In this work, we motivate the importance of a lightweight, high-performance communication between server and client and present a communication mechanism based on the Message Passing Interface (MPI). We evaluate our approach by using an Intel Xeon Phi 5110P as the acceleration target and show that the communication overhead can be reduced from 40% to 10%, thus enabling even small hotspots to benefit from offloading to an accelerator.}}, author = {{Damschen, Marvin and Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian}}, booktitle = {{Proceedings of the 2015 Conference on Design, Automation and Test in Europe (DATE)}}, pages = {{1078--1083}}, publisher = {{EDA Consortium / IEEE}}, title = {{{Transparent offloading of computational hotspots from binary code to Xeon Phi}}}, doi = {{10.7873/DATE.2015.1124}}, year = {{2015}}, } @article{1775, abstract = {{The ATLAS experiment at CERN is planning full deployment of a new unified optical link technology for connecting detector front end electronics on the timescale of the LHC Run 4 (2025). It is estimated that roughly 8000 GBT (GigaBit Transceiver) links, with transfer rates up to 10.24 Gbps, will replace existing links used for readout, detector control and distribution of timing and trigger information. A new class of devices will be needed to interface many GBT links to the rest of the trigger, data-acquisition and detector control systems. In this paper FELIX (Front End LInk eXchange) is presented, a PC-based device to route data from and to multiple GBT links via a high-performance general purpose network capable of a total throughput up to O(20 Tbps). FELIX implies architectural changes to the ATLAS data acquisition system, such as the use of industry standard COTS components early in the DAQ chain. Additionally the design and implementation of a FELIX demonstration platform is presented and hardware and software aspects will be discussed.}}, author = {{Anderson, J and Borga, A and Boterenbrood, H and Chen, H and Chen, K and Drake, G and Francis, D and Gorini, B and Lanni, F and Lehmann Miotto, G and Levinson, L and Narevicius, J and Plessl, Christian and Roich, A and Ryu, S and Schreuder, F and Schumacher, Jörn and Vandelli, Wainer and Vermeulen, J and Zhang, J}}, journal = {{Journal of Physics: Conference Series}}, publisher = {{IOP Publishing}}, title = {{{FELIX: a High-Throughput Network Approach for Interfacing to Front End Electronics for ATLAS Upgrades}}}, doi = {{10.1088/1742-6596/664/8/082050}}, volume = {{664}}, year = {{2015}}, } @inproceedings{1781, abstract = {{In light of an increasing awareness of environmental challenges, extensive research is underway to develop new light-weight materials. A problem arising with these materials is their increased response to vibration. This can be solved using a new composite material that contains embedded hollow spheres that are partially filled with particles. Progress on the adaptation of molecular dynamics towards a particle-based numerical simulation of this material is reported. This includes the treatment of specific boundary conditions and the adaption of the force computation. First results are presented that showcase the damping properties of such particle-filled spheres in a bouncing experiment.}}, author = {{Steinle, Tobias and Vrabec, Jadran and Walther, Andrea}}, booktitle = {{Proc. Modeling, Simulation and Optimization of Complex Processes (HPSC)}}, editor = {{Bock, Hans Georg and Hoang, Xuan Phu and Rannacher, Rolf and Schlöder, Johannes P.}}, isbn = {{978-3-319-09063-4}}, pages = {{233--243}}, publisher = {{Springer International Publishing}}, title = {{{Numerical Simulation of the Damping Behavior of Particle-Filled Hollow Spheres}}}, doi = {{10.1007/978-3-319-09063-4_19}}, year = {{2014}}, } @inproceedings{1782, author = {{Graf, Tobias and Schaefers, Lars and Platzner, Marco}}, booktitle = {{Proc. Conf. on Computers and Games (CG)}}, number = {{8427}}, pages = {{14--25}}, publisher = {{Springer}}, title = {{{On Semeai Detection in Monte-Carlo Go}}}, doi = {{10.1007/978-3-319-09165-5_2}}, year = {{2014}}, } @inbook{335, abstract = {{Im Bereich der Computersysteme ist die Festlegung der Grenze zwischen Hardware und Software eine zentrale Problemstellung. Diese Grenze hat in den letzten Jahrzehnten nicht nur die Entwicklung von Computersystemen bestimmt, sondern auch die Strukturierung der Ausbildung in den Computerwissenschaften beeinflusst und sogar zur Entstehung von neuen Forschungsrichtungen gef{\"u}hrt. In diesem Beitrag besch{\"a}ftigen wir uns mit Verschiebungen an der Grenze zwischen Hardware und Software und diskutieren insgesamt drei qualitativ unterschiedliche Formen solcher Verschiebungen. Wir beginnen mit der Entwicklung von Computersystemen im letzten Jahrhundert und der Entstehung dieser Grenze, die Hardware und Software erst als eigenst{\"a}ndige Produkte differenziert. Dann widmen wir uns der Frage, welche Funktionen in einem Computersystem besser in Hardware und welche besser in Software realisiert werden sollten, eine Fragestellung die zu Beginn der 90er-Jahre zur Bildung einer eigenen Forschungsrichtung, dem sogenannten Hardware/Software Co-design, gef{\"u}hrt hat. Im Hardware/Software Co-design findet eine Verschiebung von Funktionen an der Grenze zwischen Hardware und Software w{\"a}hrend der Entwicklung eines Produktes statt, um Produkteigenschaften zu optimieren. Im fertig entwickelten und eingesetzten Produkt hingegen k{\"o}nnen wir dann eine feste Grenze zwischen Hardware und Software beobachten. Im dritten Teil dieses Beitrags stellen wir mit selbst-adaptiven Systemen eine hochaktuelle Forschungsrichtung vor. In unserem Kontext bedeutet Selbstadaption, dass ein System Verschiebungen von Funktionen an der Grenze zwischen Hardware und Software autonom w{\"a}hrend der Betriebszeit vornimmt. Solche Systeme beruhen auf rekonfigurierbarer Hardware, einer relativ neuen Technologie mit der die Hardware eines Computers w{\"a}hrend der Laufzeit ver{\"a}ndert werden kann. Diese Technologie f{\"u}hrt zu einer durchl{\"a}ssigen Grenze zwischen Hardware und Software bzw. l{\"o}st sie die herk{\"o}mmliche Vorstellung einer festen Hardware und einer flexiblen Software damit auf.}}, author = {{Platzner, Marco and Plessl, Christian}}, booktitle = {{Logiken strukturbildender Prozesse: Automatismen}}, editor = {{Künsemöller, Jörn and Eke, Norber Otto and Foit, Lioba and Kaerlein, Timo}}, isbn = {{978-3-7705-5730-1}}, pages = {{123--144}}, publisher = {{Wilhelm Fink}}, title = {{{Verschiebungen an der Grenze zwischen Hardware und Software}}}, year = {{2014}}, } @inproceedings{388, abstract = {{In order to leverage the use of reconfigurable architectures in general-purpose computing, quick and automated methods to find suitable accelerator designs are required. We tackle this challenge in both regards. In order to avoid long synthesis times, we target a vector copro- cessor, implemented on the FPGAs of a Convey HC-1. Previous studies showed that existing tools were not able to accelerate a real-world application with low effort. We present a toolflow to automatically identify suitable loops for vectorization, generate a corresponding hardware/software bipartition, and generate coprocessor code. Where applicable, we leverage outer-loop vectorization. We evaluate our tools with a set of characteristic loops, systematically analyzing different dependency and data layout properties.}}, author = {{Kenter, Tobias and Vaz, Gavin Francis and Plessl, Christian}}, booktitle = {{Proceedings of the International Symposium on Reconfigurable Computing: Architectures, Tools, and Applications (ARC)}}, pages = {{144--155}}, publisher = {{Springer International Publishing}}, title = {{{Partitioning and Vectorizing Binary Applications for a Reconfigurable Vector Computer}}}, doi = {{10.1007/978-3-319-05960-0_13}}, volume = {{8405}}, year = {{2014}}, } @article{363, abstract = {{Due to the continuously shrinking device structures and increasing densities of FPGAs, thermal aspects have become the new focus for many research projects over the last years. Most researchers rely on temperature simulations to evaluate their novel thermal management techniques. However, these temperature simulations require a high computational effort if a detailed thermal model is used and their accuracies are often unclear. In contrast to simulations, the use of synthetic heat sources allows for experimental evaluation of temperature management methods. In this paper we investigate the creation of significant rises in temperature on modern FPGAs to enable future evaluation of thermal management techniques based on experiments. To that end, we have developed seven different heat-generating cores that use different subsets of FPGA resources. Our experimental results show that, according to external temperature probes connected to the FPGA’s heat sink, we can increase the temperature by an average of 81 !C. This corresponds to an average increase of 156.3 !C as measured by the built-in thermal diodes of our Virtex-5 FPGAs in less than 30 min by only utilizing about 21 percent of the slices.}}, author = {{Agne, Andreas and Hangmann, Hendrik and Happe, Markus and Platzner, Marco and Plessl, Christian}}, journal = {{Microprocessors and Microsystems}}, number = {{8, Part B}}, pages = {{911--919}}, publisher = {{Elsevier}}, title = {{{Seven Recipes for Setting Your FPGA on Fire – A Cookbook on Heat Generators}}}, doi = {{10.1016/j.micpro.2013.12.001}}, volume = {{38}}, year = {{2014}}, } @inproceedings{377, abstract = {{In this paper, we study how AES key schedules can be reconstructed from decayed memory. This operation is a crucial and time consuming operation when trying to break encryption systems with cold-boot attacks. In software, the reconstruction of the AES master key can be performed using a recursive, branch-and-bound tree-search algorithm that exploits redundancies in the key schedule for constraining the search space. In this work, we investigate how this branch-and-bound algorithm can be accelerated with FPGAs. We translated the recursive search procedure to a state machine with an explicit stack for each recursion level and create optimized datapaths to accelerate in particular the processing of the most frequently accessed tree levels. We support two different decay models, of which especially the more realistic non-idealized asymmetric decay model causes very high runtimes in software. Our implementation on a Maxeler dataflow computing system outperforms a software implementation for this model by up to 27x, which makes cold-boot attacks against AES practical even for high error rates.}}, author = {{Riebler, Heinrich and Kenter, Tobias and Plessl, Christian and Sorge, Christoph}}, booktitle = {{Proceedings of Field-Programmable Custom Computing Machines (FCCM)}}, keywords = {{coldboot}}, pages = {{222--229}}, publisher = {{IEEE}}, title = {{{Reconstructing AES Key Schedules from Decayed Memory with FPGAs}}}, doi = {{10.1109/FCCM.2014.67}}, year = {{2014}}, } @article{365, abstract = {{Self-aware computing is a paradigm for structuring and simplifying the design and operation of computing systems that face unprecedented levels of system dynamics and thus require novel forms of adaptivity. The generality of the paradigm makes it applicable to many types of computing systems and, previously, researchers started to introduce concepts of self-awareness to multicore architectures. In our work we build on a recent reference architectural framework as a model for self-aware computing and instantiate it for an FPGA-based heterogeneous multicore running the ReconOS reconfigurable architecture and operating system. After presenting the model for self-aware computing and ReconOS, we demonstrate with a case study how a multicore application built on the principle of self-awareness, autonomously adapts to changes in the workload and system state. Our work shows that the reference architectural framework as a model for self-aware computing can be practically applied and allows us to structure and simplify the design process, which is essential for designing complex future computing systems.}}, author = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems (TRETS)}}, number = {{2}}, publisher = {{ACM}}, title = {{{Self-awareness as a Model for Designing and Operating Heterogeneous Multicores}}}, doi = {{10.1145/2617596}}, volume = {{7}}, year = {{2014}}, } @article{328, abstract = {{The ReconOS operating system for reconfigurable computing offers a unified multi-threaded programming model and operating system services for threads executing in software and threads mapped to reconfigurable hardware. The operating system interface allows hardware threads to interact with software threads using well-known mechanisms such as semaphores, mutexes, condition variables, and message queues. By semantically integrating hardware accelerators into a standard operating system environment, ReconOS allows for rapid design space exploration, supports a structured application development process and improves the portability of applications}}, author = {{Agne, Andreas and Happe, Markus and Keller, Ariane and Lübbers, Enno and Plattner, Bernhard and Platzner, Marco and Plessl, Christian}}, journal = {{IEEE Micro}}, number = {{1}}, pages = {{60--71}}, publisher = {{IEEE}}, title = {{{ReconOS - An Operating System Approach for Reconfigurable Computing}}}, doi = {{10.1109/MM.2013.110}}, volume = {{34}}, year = {{2014}}, } @inproceedings{1778, author = {{C. Durelli, Gianluca and Pogliani, Marcello and Miele, Antonio and Plessl, Christian and Riebler, Heinrich and Vaz, Gavin Francis and D. Santambrogio, Marco and Bolchini, Cristiana}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing with Applications (ISPA)}}, pages = {{142--149}}, publisher = {{IEEE}}, title = {{{Runtime Resource Management in Heterogeneous System Architectures: The SAVE Approach}}}, doi = {{10.1109/ISPA.2014.27}}, year = {{2014}}, } @inproceedings{439, abstract = {{Reconfigurable architectures provide an opportunityto accelerate a wide range of applications, frequentlyby exploiting data-parallelism, where the same operations arehomogeneously executed on a (large) set of data. However, whenthe sequential code is executed on a host CPU and only dataparallelloops are executed on an FPGA coprocessor, a sufficientlylarge number of loop iterations (trip counts) is required, such thatthe control- and data-transfer overheads to the coprocessor canbe amortized. However, the trip count of large data-parallel loopsis frequently not known at compile time, but only at runtime justbefore entering a loop. Therefore, we propose to generate codeboth for the CPU and the coprocessor, and to defer the decisionwhere to execute the appropriate code to the runtime of theapplication when the trip count of the loop can be determinedjust at runtime. We demonstrate how an LLVM compiler basedtoolflow can automatically insert appropriate decision blocks intothe application code. Analyzing popular benchmark suites, weshow that this kind of runtime decisions is often applicable. Thepractical feasibility of our approach is demonstrated by a toolflowthat automatically identifies loops suitable for vectorization andgenerates code for the FPGA coprocessor of a Convey HC-1. Thetoolflow adds decisions based on a comparison of the runtimecomputedtrip counts to thresholds for specific loops and alsoincludes support to move just the required data to the coprocessor.We evaluate the integrated toolflow with characteristic loopsexecuted on different input data sizes.}}, author = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Deferring Accelerator Offloading Decisions to Application Runtime}}}, doi = {{10.1109/ReConFig.2014.7032509}}, year = {{2014}}, } @inproceedings{406, abstract = {{Stereo-matching algorithms recently received a lot of attention from the FPGA acceleration community. Presented solutions range from simple, very resource efficient systems with modest matching quality for small embedded systems to sophisticated algorithms with several processing steps, implemented on big FPGAs. In order to achieve high throughput, most implementations strongly focus on pipelining and data reuse between different computation steps. This approach leads to high efficiency, but limits the supported computation patterns and due the high integration of the implementation, adaptions to the algorithm are difficult. In this work, we present a stereo-matching implementation, that starts by offloading individual kernels from the CPU to the FPGA. Between subsequent compute steps on the FPGA, data is stored off-chip in on-board memory of the FPGA accelerator card. This enables us to accelerate the AD-census algorithm with cross-based aggregation and scanline optimization for the first time without algorithmic changes and for up to full HD image dimensions. Analyzing throughput and bandwidth requirements, we outline some trade-offs that are involved with this approach, compared to tighter integration of more kernel loops into one design.}}, author = {{Kenter, Tobias and Schmitz, Henning and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Kernel-Centric Acceleration of High Accuracy Stereo-Matching}}}, doi = {{10.1109/ReConFig.2014.7032535}}, year = {{2014}}, } @inproceedings{1780, author = {{C. Durelli, Gianluca and Copolla, Marcello and Djafarian, Karim and Koranaros, George and Miele, Antonio and Paolino, Michele and Pell, Oliver and Plessl, Christian and D. Santambrogio, Marco and Bolchini, Cristiana}}, booktitle = {{Proc. Int. Conf. on Reconfigurable Computing: Architectures, Tools and Applications (ARC)}}, publisher = {{Springer}}, title = {{{SAVE: Towards efficient resource management in heterogeneous system architectures}}}, doi = {{10.1007/978-3-319-05960-0_38}}, year = {{2014}}, } @article{1779, author = {{Giefers, Heiner and Plessl, Christian and Förstner, Jens}}, issn = {{0163-5964}}, journal = {{ACM SIGARCH Computer Architecture News}}, keywords = {{funding-maxup, tet_topic_hpc}}, number = {{5}}, pages = {{65--70}}, publisher = {{ACM}}, title = {{{Accelerating Finite Difference Time Domain Simulations with Reconfigurable Dataflow Computers}}}, doi = {{10.1145/2641361.2641372}}, volume = {{41}}, year = {{2014}}, } @inproceedings{1784, author = {{Kaiser, Jürgen and Meister, Dirk and Gottfried, Viktor and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Networking, Architecture and Storage (NAS)}}, pages = {{88--97}}, publisher = {{IEEE Computer Society}}, title = {{{MCD: Overcoming the Data Download Bottleneck in Data Centers}}}, doi = {{10.1109/NAS.2013.18}}, year = {{2013}}, } @inproceedings{1786, author = {{Kasap, Server and Redif, Soydan}}, booktitle = {{Proc. IEEE Signal Processing and Communications Conf. (SUI)}}, publisher = {{IEEE}}, title = {{{FPGA Implementation of a Second-Order Convolutive Blind Signal Separation Algorithm}}}, doi = {{10.1109/SIU.2013.6531530}}, year = {{2013}}, } @inproceedings{1788, author = {{Berenbrink, Petra and Brinkmann, André and Friedetzky, Tom and Meister, Dirk and Nagel, Lars}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing Workshops (IPDPSW)}}, publisher = {{IEEE}}, title = {{{Distributing Storage in Cloud Environments}}}, doi = {{10.1109/IPDPSW.2013.148}}, year = {{2013}}, } @book{1790, author = {{Niehörster, Oliver}}, isbn = {{978-3-8440-1735-9}}, publisher = {{Shaker}}, title = {{{Autonomous Resource Management in Dynamic Data Centers}}}, year = {{2013}}, }