@unpublished{43439, abstract = {{This preprint makes the claim of having computed the $9^{th}$ Dedekind Number. This was done by building an efficient FPGA Accelerator for the core operation of the process, and parallelizing it on the Noctua 2 Supercluster at Paderborn University. The resulting value is 286386577668298411128469151667598498812366. This value can be verified in two steps. We have made the data file containing the 490M results available, each of which can be verified separately on CPU, and the whole file sums to our proposed value.}}, author = {{Van Hirtum, Lennart and De Causmaecker, Patrick and Goemaere, Jens and Kenter, Tobias and Riebler, Heinrich and Lass, Michael and Plessl, Christian}}, booktitle = {{arXiv:2304.03039}}, title = {{{A computation of D(9) using FPGA Supercomputing}}}, year = {{2023}}, } @article{45361, abstract = {{ The non-orthogonal local submatrix method applied to electronic structure–based molecular dynamics simulations is shown to exceed 1.1 EFLOP/s in FP16/FP32-mixed floating-point arithmetic when using 4400 NVIDIA A100 GPUs of the Perlmutter system. This is enabled by a modification of the original method that pushes the sustained fraction of the peak performance to about 80%. Example calculations are performed for SARS-CoV-2 spike proteins with up to 83 million atoms. }}, author = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Kühne, Thomas and Plessl, Christian}}, issn = {{1094-3420}}, journal = {{The International Journal of High Performance Computing Applications}}, keywords = {{Hardware and Architecture, Theoretical Computer Science, Software}}, publisher = {{SAGE Publications}}, title = {{{Breaking the exascale barrier for the electronic structure problem in ab-initio molecular dynamics}}}, doi = {{10.1177/10943420231177631}}, year = {{2023}}, } @inbook{45893, author = {{Hansmeier, Tim and Kenter, Tobias and Meyer, Marius and Riebler, Heinrich and Platzner, Marco and Plessl, Christian}}, booktitle = {{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}, editor = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{165--182}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{Compute Centers I: Heterogeneous Execution Environments}}}, doi = {{10.5281/zenodo.8068642}}, volume = {{412}}, year = {{2023}}, } @inproceedings{43228, abstract = {{The computation of electron repulsion integrals (ERIs) over Gaussian-type orbitals (GTOs) is a challenging problem in quantum-mechanics-based atomistic simulations. In practical simulations, several trillions of ERIs may have to be computed for every time step. In this work, we investigate FPGAs as accelerators for the ERI computation. We use template parameters, here within the Intel oneAPI tool flow, to create customized designs for 256 different ERI quartet classes, based on their orbitals. To maximize data reuse, all intermediates are buffered in FPGA on-chip memory with customized layout. The pre-calculation of intermediates also helps to overcome data dependencies caused by multi-dimensional recurrence relations. The involved loop structures are partially or even fully unrolled for high throughput of FPGA kernels. Furthermore, a lossy compression algorithm utilizing arbitrary bitwidth integers is integrated in the FPGA kernels. To our best knowledge, this is the first work on ERI computation on FPGAs that supports more than just the single most basic quartet class. Also, the integration of ERI computation and compression it a novelty that is not even covered by CPU or GPU libraries so far. Our evaluation shows that using 16-bit integer for the ERI compression, the fastest FPGA kernels exceed the performance of 10 GERIS ($10 \times 10^9$ ERIs per second) on one Intel Stratix 10 GX 2800 FPGA, with maximum absolute errors around $10^{-7}$ - $10^{-5}$ Hartree. The measured throughput can be accurately explained by a performance model. The FPGA kernels deployed on 2 FPGAs outperform similar computations using the widely used libint reference on a two-socket server with 40 Xeon Gold 6148 CPU cores of the same process technology by factors up to 6.0x and on a new two-socket server with 128 EPYC 7713 CPU cores by up to 1.9x.}}, author = {{Wu, Xin and Kenter, Tobias and Schade, Robert and Kühne, Thomas and Plessl, Christian}}, booktitle = {{2023 IEEE 31st Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM)}}, pages = {{162--173}}, title = {{{Computing and Compressing Electron Repulsion Integrals on FPGAs}}}, doi = {{10.1109/FCCM57271.2023.00026}}, year = {{2023}}, } @inproceedings{46189, author = {{Prouveur, Charles and Haefele, Matthieu and Kenter, Tobias and Voss, Nils}}, booktitle = {{Proceedings of the Platform for Advanced Scientific Computing Conference}}, publisher = {{ACM}}, title = {{{FPGA Acceleration for HPC Supercapacitor Simulations}}}, doi = {{10.1145/3592979.3593419}}, year = {{2023}}, } @unpublished{50172, abstract = {{Viscous hydrodynamics serves as a successful mesoscopic description of the Quark-Gluon Plasma produced in relativistic heavy-ion collisions. In order to investigate, how such an effective description emerges from the underlying microscopic dynamics we calculate the hydrodynamic and non-hydrodynamic modes of linear response in the sound channel from a first-principle calculation in kinetic theory. We do this with a new approach wherein we discretize the collision kernel to directly calculate eigenvalues and eigenmodes of the evolution operator. This allows us to study the Green's functions at any point in the complex frequency space. Our study focuses on scalar theory with quartic interaction and we find that the analytic structure of Green's functions in the complex plane is far more complicated than just poles or cuts which is a first step towards an equivalent study in QCD kinetic theory.}}, author = {{Ochsenfeld, Stephan and Schlichting, Sören}}, booktitle = {{arXiv:2308.04491}}, title = {{{Hydrodynamic and Non-hydrodynamic Excitations in Kinetic Theory -- A Numerical Analysis in Scalar Field Theory}}}, year = {{2023}}, } @unpublished{50221, abstract = {{Memory Gym presents a suite of 2D partially observable environments, namely Mortar Mayhem, Mystery Path, and Searing Spotlights, designed to benchmark memory capabilities in decision-making agents. These environments, originally with finite tasks, are expanded into innovative, endless formats, mirroring the escalating challenges of cumulative memory games such as ``I packed my bag''. This progression in task design shifts the focus from merely assessing sample efficiency to also probing the levels of memory effectiveness in dynamic, prolonged scenarios. To address the gap in available memory-based Deep Reinforcement Learning baselines, we introduce an implementation that integrates Transformer-XL (TrXL) with Proximal Policy Optimization. This approach utilizes TrXL as a form of episodic memory, employing a sliding window technique. Our comparative study between the Gated Recurrent Unit (GRU) and TrXL reveals varied performances across different settings. TrXL, on the finite environments, demonstrates superior sample efficiency in Mystery Path and outperforms in Mortar Mayhem. However, GRU is more efficient on Searing Spotlights. Most notably, in all endless tasks, GRU makes a remarkable resurgence, consistently outperforming TrXL by significant margins. Website and Source Code: https://github.com/MarcoMeter/endless-memory-gym/}}, author = {{Pleines, Marco and Pallasch, Matthias and Zimmer, Frank and Preuss, Mike}}, booktitle = {{arXiv:2309.17207}}, title = {{{Memory Gym: Towards Endless Tasks to Benchmark Memory Capabilities of Agents}}}, year = {{2023}}, } @article{38041, abstract = {{While FPGA accelerator boards and their respective high-level design tools are maturing, there is still a lack of multi-FPGA applications, libraries, and not least, benchmarks and reference implementations towards sustained HPC usage of these devices. As in the early days of GPUs in HPC, for workloads that can reasonably be decoupled into loosely coupled working sets, multi-accelerator support can be achieved by using standard communication interfaces like MPI on the host side. However, for performance and productivity, some applications can profit from a tighter coupling of the accelerators. FPGAs offer unique opportunities here when extending the dataflow characteristics to their communication interfaces. In this work, we extend the HPCC FPGA benchmark suite by multi-FPGA support and three missing benchmarks that particularly characterize or stress inter-device communication: b_eff, PTRANS, and LINPACK. With all benchmarks implemented for current boards with Intel and Xilinx FPGAs, we established a baseline for multi-FPGA performance. Additionally, for the communication-centric benchmarks, we explored the potential of direct FPGA-to-FPGA communication with a circuit-switched inter-FPGA network that is currently only available for one of the boards. The evaluation with parallel execution on up to 26 FPGA boards makes use of one of the largest academic FPGA installations.}}, author = {{Meyer, Marius and Kenter, Tobias and Plessl, Christian}}, issn = {{1936-7406}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems}}, keywords = {{General Computer Science}}, publisher = {{Association for Computing Machinery (ACM)}}, title = {{{Multi-FPGA Designs and Scaling of HPC Challenge Benchmarks via MPI and Circuit-Switched Inter-FPGA Networks}}}, doi = {{10.1145/3576200}}, year = {{2023}}, } @inproceedings{46190, author = {{Opdenhövel, Jan-Oliver and Plessl, Christian and Kenter, Tobias}}, booktitle = {{Proceedings of the 13th International Symposium on Highly Efficient Accelerators and Reconfigurable Technologies}}, publisher = {{ACM}}, title = {{{Mutation Tree Reconstruction of Tumor Cells on FPGAs Using a Bit-Level Matrix Representation}}}, doi = {{10.1145/3597031.3597050}}, year = {{2023}}, } @inproceedings{46188, author = {{Faj, Jennifer and Kenter, Tobias and Faghih-Naini, Sara and Plessl, Christian and Aizinger, Vadym}}, booktitle = {{Proceedings of the Platform for Advanced Scientific Computing Conference}}, publisher = {{ACM}}, title = {{{Scalable Multi-FPGA Design of a Discontinuous Galerkin Shallow-Water Model on Unstructured Meshes}}}, doi = {{10.1145/3592979.3593407}}, year = {{2023}}, } @inbook{46191, author = {{Alt, Christoph and Kenter, Tobias and Faghih-Naini, Sara and Faj, Jennifer and Opdenhövel, Jan-Oliver and Plessl, Christian and Aizinger, Vadym and Hönig, Jan and Köstler, Harald}}, booktitle = {{Lecture Notes in Computer Science}}, isbn = {{9783031320408}}, issn = {{0302-9743}}, publisher = {{Springer Nature Switzerland}}, title = {{{Shallow Water DG Simulations on FPGAs: Design and Comparison of a Novel Code Generation Pipeline}}}, doi = {{10.1007/978-3-031-32041-5_5}}, year = {{2023}}, } @article{46120, abstract = {{The rise of exascale supercomputers has fueled competition among GPU vendors, driving lattice QCD developers to write code that supports multiple APIs. Moreover, new developments in algorithms and physics research require frequent updates to existing software. These challenges have to be balanced against constantly changing personnel. At the same time, there is a wide range of applications for HISQ fermions in QCD studies. This situation encourages the development of software featuring a HISQ action that is flexible, high-performing, open source, easy to use, and easy to adapt. In this technical paper, we explain the design strategy, provide implementation details, list available algorithms and modules, and show key performance indicators for SIMULATeQCD, a simple multi-GPU lattice code for large-scale QCD calculations, mainly developed and used by the HotQCD collaboration. The code is publicly available on GitHub.}}, author = {{Mazur, Lukas and Bollweg, Dennis and Clarke, David A. and Altenkort, Luis and Kaczmarek, Olaf and Larsen, Rasmus and Shu, Hai-Tao and Goswami, Jishnu and Scior, Philipp and Sandmeyer, Hauke and Neumann, Marius and Dick, Henrik and Ali, Sajid and Kim, Jangho and Schmidt, Christian and Petreczky, Peter and Mukherjee, Swagato}}, journal = {{Computer Physics Communications}}, title = {{{SIMULATeQCD: A simple multi-GPU lattice code for QCD calculations}}}, doi = {{10.48550/ARXIV.2306.01098}}, year = {{2023}}, } @article{46119, author = {{Altenkort, Luis and Eller, Alexander M. and Francis, Anthony and Kaczmarek, Olaf and Mazur, Lukas and Moore, Guy D. and Shu, Hai-Tao}}, issn = {{2470-0010}}, journal = {{Physical Review D}}, number = {{1}}, publisher = {{American Physical Society (APS)}}, title = {{{Viscosity of pure-glue QCD from the lattice}}}, doi = {{10.1103/physrevd.108.014503}}, volume = {{108}}, year = {{2023}}, } @inproceedings{46193, author = {{Karp, Martin and Podobas, Artur and Kenter, Tobias and Jansson, Niclas and Plessl, Christian and Schlatter, Philipp and Markidis, Stefano}}, booktitle = {{International Conference on High Performance Computing in Asia-Pacific Region}}, publisher = {{ACM}}, title = {{{A High-Fidelity Flow Solver for Unstructured Meshes on Field-Programmable Gate Arrays: Design, Evaluation, and Future Challenges}}}, doi = {{10.1145/3492805.3492808}}, year = {{2022}}, } @unpublished{36879, abstract = {{The Julia programming language has evolved into a modern alternative to fill existing gaps in scientific computing and data science applications. Julia leverages a unified and coordinated single-language and ecosystem paradigm and has a proven track record of achieving high performance without sacrificing user productivity. These aspects make Julia a viable alternative to high-performance computing's (HPC's) existing and increasingly costly many-body workflow composition strategy in which traditional HPC languages (e.g., Fortran, C, C++) are used for simulations, and higher-level languages (e.g., Python, R, MATLAB) are used for data analysis and interactive computing. Julia's rapid growth in language capabilities, package ecosystem, and community make it a promising universal language for HPC. This paper presents the views of a multidisciplinary group of researchers from academia, government, and industry that advocate for an HPC software development paradigm that emphasizes developer productivity, workflow portability, and low barriers for entry. We believe that the Julia programming language, its ecosystem, and its community provide modern and powerful capabilities that enable this group's objectives. Crucially, we believe that Julia can provide a feasible and less costly approach to programming scientific applications and workflows that target HPC facilities. In this work, we examine the current practice and role of Julia as a common, end-to-end programming model to address major challenges in scientific reproducibility, data-driven AI/machine learning, co-design and workflows, scalability and performance portability in heterogeneous computing, network communication, data management, and community education. As a result, the diversification of current investments to fulfill the needs of the upcoming decade is crucial as more supercomputing centers prepare for the exascale era.}}, author = {{Churavy, Valentin and Godoy, William F and Bauer, Carsten and Ranocha, Hendrik and Schlottke-Lakemper, Michael and Räss, Ludovic and Blaschke, Johannes and Giordano, Mosè and Schnetter, Erik and Omlin, Samuel and Vetter, Jeffrey S and Edelman, Alan}}, title = {{{Bridging HPC Communities through the Julia Programming Language}}}, year = {{2022}}, } @phdthesis{32414, author = {{Lass, Michael}}, publisher = {{Universität Paderborn}}, title = {{{Bringing Massive Parallelism and Hardware Acceleration to Linear Scaling Density Functional Theory Through Targeted Approximations}}}, doi = {{10.17619/UNIPB/1-1281}}, year = {{2022}}, } @unpublished{32404, abstract = {{The CP2K program package, which can be considered as the swiss army knife of atomistic simulations, is presented with a special emphasis on ab-initio molecular dynamics using the second-generation Car-Parrinello method. After outlining current and near-term development efforts with regards to massively parallel low-scaling post-Hartree-Fock and eigenvalue solvers, novel approaches on how we plan to take full advantage of future low-precision hardware architectures are introduced. Our focus here is on combining our submatrix method with the approximate computing paradigm to address the immanent exascale era.}}, author = {{Kühne, Thomas and Plessl, Christian and Schade, Robert and Schütt, Ole}}, booktitle = {{arXiv:2205.14741}}, title = {{{CP2K on the road to exascale}}}, year = {{2022}}, } @article{32234, author = {{Wojciechowski, M}}, issn = {{2352-3409}}, journal = {{Data Brief}}, pages = {{108318}}, title = {{{Dataset for random uniform distributions of 2D circles and 3D spheres.}}}, volume = {{43}}, year = {{2022}}, } @unpublished{32177, abstract = {{We investigate the early time development of the anisotropic transverse flow and spatial eccentricities of a fireball with various particle-based transport approaches using a fixed initial condition. In numerical simulations ranging from the quasi-collisionless case to the hydrodynamic regime, we find that the onset of $v_n$ and of related measures of anisotropic flow can be described with a simple power-law ansatz, with an exponent that depends on the amount of rescatterings in the system. In the few-rescatterings regime we perform semi-analytical calculations, based on a systematic expansion in powers of time and the cross section, which can reproduce the numerical findings.}}, author = {{Borghini, Nicolas and Borrell, Marc and Roch, Hendrik}}, booktitle = {{arXiv:2201.13294}}, title = {{{Early time behavior of spatial and momentum anisotropies in kinetic theory across different Knudsen numbers}}}, year = {{2022}}, } @unpublished{32178, abstract = {{We test the ability of the "escape mechanism" to create the anisotropic flow observed in high-energy nuclear collisions. We compare the flow harmonics $v_n$ in the few-rescatterings regime from two types of transport simulations, with $2\to 2$ and $2\to 0$ collision kernels respectively, and from analytical calculations neglecting the gain term of the Boltzmann equation. We find that the even flow harmonics are similar in the three approaches, while the odd harmonics differ significantly.}}, author = {{Bachmann, Benedikt and Borghini, Nicolas and Feld, Nina and Roch, Hendrik}}, booktitle = {{arXiv:2203.13306}}, title = {{{Even anisotropic-flow harmonics are from Venus, odd ones are from Mars}}}, year = {{2022}}, } @article{27364, author = {{Meyer, Marius and Kenter, Tobias and Plessl, Christian}}, issn = {{0743-7315}}, journal = {{Journal of Parallel and Distributed Computing}}, title = {{{In-depth FPGA Accelerator Performance Evaluation with Single Node Benchmarks from the HPC Challenge Benchmark Suite for Intel and Xilinx FPGAs using OpenCL}}}, doi = {{10.1016/j.jpdc.2021.10.007}}, year = {{2022}}, } @article{46121, author = {{Altenkort, Luis and Eller, Alexander M. and Kaczmarek, O. and Mazur, Lukas and Moore, Guy D. and Shu, Hai-Tao}}, issn = {{2470-0010}}, journal = {{Physical Review D}}, number = {{9}}, publisher = {{American Physical Society (APS)}}, title = {{{Lattice QCD noise reduction for bosonic correlators through blocking}}}, doi = {{10.1103/physrevd.105.094505}}, volume = {{105}}, year = {{2022}}, } @article{32183, author = {{Hou, W and Yao, Y and Li, Y and Peng, B and Shi, K and Zhou, Z and Pan, J and Liu, M and Hu, J}}, issn = {{2095-025x}}, journal = {{Frontiers of materials science}}, number = {{1}}, title = {{{Linearly shifting ferromagnetic resonance response of La0.7Sr0.3MnO3 thin film for body temperature sensors}}}, volume = {{16}}, year = {{2022}}, } @article{40523, abstract = {{AbstractTailored nanoscale quantum light sources, matching the specific needs of use cases, are crucial building blocks for photonic quantum technologies. Several different approaches to realize solid-state quantum emitters with high performance have been pursued and different concepts for energy tuning have been established. However, the properties of the emitted photons are always defined by the individual quantum emitter and can therefore not be controlled with full flexibility. Here we introduce an all-optical nonlinear method to tailor and control the single photon emission. We demonstrate a laser-controlled down-conversion process from an excited state of a semiconductor quantum three-level system. Based on this concept, we realize energy tuning and polarization control of the single photon emission with a control-laser field. Our results mark an important step towards tailored single photon emission from a photonic quantum system based on quantum optical principles.}}, author = {{Jonas, B. and Heinze, Dirk Florian and Schöll, E. and Kallert, P. and Langer, T. and Krehs, S. and Widhalm, A. and Jöns, Klaus and Reuter, Dirk and Schumacher, Stefan and Zrenner, Artur}}, issn = {{2041-1723}}, journal = {{Nature Communications}}, keywords = {{General Physics and Astronomy, General Biochemistry, Genetics and Molecular Biology, General Chemistry, Multidisciplinary}}, number = {{1}}, publisher = {{Springer Science and Business Media LLC}}, title = {{{Nonlinear down-conversion in a single quantum dot}}}, doi = {{10.1038/s41467-022-28993-3}}, volume = {{13}}, year = {{2022}}, } @article{33226, abstract = {{A parallel hybrid quantum-classical algorithm for the solution of the quantum-chemical ground-state energy problem on gate-based quantum computers is presented. This approach is based on the reduced density-matrix functional theory (RDMFT) formulation of the electronic structure problem. For that purpose, the density-matrix functional of the full system is decomposed into an indirectly coupled sum of density-matrix functionals for all its subsystems using the adaptive cluster approximation to RDMFT. The approximations involved in the decomposition and the adaptive cluster approximation itself can be systematically converged to the exact result. The solutions for the density-matrix functionals of the effective subsystems involves a constrained minimization over many-particle states that are approximated by parametrized trial states on the quantum computer similarly to the variational quantum eigensolver. The independence of the density-matrix functionals of the effective subsystems introduces a new level of parallelization and allows for the computational treatment of much larger molecules on a quantum computer with a given qubit count. In addition, for the proposed algorithm techniques are presented to reduce the qubit count, the number of quantum programs, as well as its depth. The evaluation of a density-matrix functional as the essential part of our approach is demonstrated for Hubbard-like systems on IBM quantum computers based on superconducting transmon qubits.}}, author = {{Schade, Robert and Bauer, Carsten and Tamoev, Konstantin and Mazur, Lukas and Plessl, Christian and Kühne, Thomas}}, journal = {{Phys. Rev. Research}}, pages = {{033160}}, publisher = {{American Physical Society}}, title = {{{Parallel quantum chemistry on noisy intermediate-scale quantum computers}}}, doi = {{10.1103/PhysRevResearch.4.033160}}, volume = {{4}}, year = {{2022}}, } @article{50149, abstract = {{Abstract RNA editing processes are strikingly different in animals and plants. Up to thousands of specific cytidines are converted into uridines in plant chloroplasts and mitochondria whereas up to millions of adenosines are converted into inosines in animal nucleo-cytosolic RNAs. It is unknown whether these two different RNA editing machineries are mutually incompatible. RNA-binding pentatricopeptide repeat (PPR) proteins are the key factors of plant organelle cytidine-to-uridine RNA editing. The complete absence of PPR mediated editing of cytosolic RNAs might be due to a yet unknown barrier that prevents its activity in the cytosol. Here, we transferred two plant mitochondrial PPR-type editing factors into human cell lines to explore whether they could operate in the nucleo-cytosolic environment. PPR56 and PPR65 not only faithfully edited their native, co-transcribed targets but also different sets of off-targets in the human background transcriptome. More than 900 of such off-targets with editing efficiencies up to 91%, largely explained by known PPR-RNA binding properties, were identified for PPR56. Engineering two crucial amino acid positions in its PPR array led to predictable shifts in target recognition. We conclude that plant PPR editing factors can operate in the entirely different genetic environment of the human nucleo-cytosol and can be intentionally re-engineered towards new targets.}}, author = {{Lesch, Elena and Schilling, Maximilian T and Brenner, Sarah and Yang, Yingying and Gruss, Oliver J and Knoop, Volker and Schallenberg-Rüdinger, Mareike}}, issn = {{0305-1048}}, journal = {{Nucleic Acids Research}}, keywords = {{Genetics}}, number = {{17}}, pages = {{9966--9983}}, publisher = {{Oxford University Press (OUP)}}, title = {{{Plant mitochondrial RNA editing factors can perform targeted C-to-U editing of nuclear transcripts in human cells}}}, doi = {{10.1093/nar/gkac752}}, volume = {{50}}, year = {{2022}}, } @unpublished{33493, abstract = {{Electronic structure calculations have been instrumental in providing many important insights into a range of physical and chemical properties of various molecular and solid-state systems. Their importance to various fields, including materials science, chemical sciences, computational chemistry and device physics, is underscored by the large fraction of available public supercomputing resources devoted to these calculations. As we enter the exascale era, exciting new opportunities to increase simulation numbers, sizes, and accuracies present themselves. In order to realize these promises, the community of electronic structure software developers will however first have to tackle a number of challenges pertaining to the efficient use of new architectures that will rely heavily on massive parallelism and hardware accelerators. This roadmap provides a broad overview of the state-of-the-art in electronic structure calculations and of the various new directions being pursued by the community. It covers 14 electronic structure codes, presenting their current status, their development priorities over the next five years, and their plans towards tackling the challenges and leveraging the opportunities presented by the advent of exascale computing.}}, author = {{Gavini, Vikram and Baroni, Stefano and Blum, Volker and Bowler, David R. and Buccheri, Alexander and Chelikowsky, James R. and Das, Sambit and Dawson, William and Delugas, Pietro and Dogan, Mehmet and Draxl, Claudia and Galli, Giulia and Genovese, Luigi and Giannozzi, Paolo and Giantomassi, Matteo and Gonze, Xavier and Govoni, Marco and Gulans, Andris and Gygi, François and Herbert, John M. and Kokott, Sebastian and Kühne, Thomas and Liou, Kai-Hsin and Miyazaki, Tsuyoshi and Motamarri, Phani and Nakata, Ayako and Pask, John E. and Plessl, Christian and Ratcliff, Laura E. and Richard, Ryan M. and Rossi, Mariana and Schade, Robert and Scheffler, Matthias and Schütt, Ole and Suryanarayana, Phanish and Torrent, Marc and Truflandier, Lionel and Windus, Theresa L. and Xu, Qimen and Yu, Victor W. -Z. and Perez, Danny}}, booktitle = {{arXiv:2209.12747}}, title = {{{Roadmap on Electronic Structure Codes in the Exascale Era}}}, year = {{2022}}, } @unpublished{46275, abstract = {{Electronic structure calculations have been instrumental in providing many important insights into a range of physical and chemical properties of various molecular and solid-state systems. Their importance to various fields, including materials science, chemical sciences, computational chemistry and device physics, is underscored by the large fraction of available public supercomputing resources devoted to these calculations. As we enter the exascale era, exciting new opportunities to increase simulation numbers, sizes, and accuracies present themselves. In order to realize these promises, the community of electronic structure software developers will however first have to tackle a number of challenges pertaining to the efficient use of new architectures that will rely heavily on massive parallelism and hardware accelerators. This roadmap provides a broad overview of the state-of-the-art in electronic structure calculations and of the various new directions being pursued by the community. It covers 14 electronic structure codes, presenting their current status, their development priorities over the next five years, and their plans towards tackling the challenges and leveraging the opportunities presented by the advent of exascale computing.}}, author = {{Gavini, Vikram and Baroni, Stefano and Blum, Volker and Bowler, David R. and Buccheri, Alexander and Chelikowsky, James R. and Das, Sambit and Dawson, William and Delugas, Pietro and Dogan, Mehmet and Draxl, Claudia and Galli, Giulia and Genovese, Luigi and Giannozzi, Paolo and Giantomassi, Matteo and Gonze, Xavier and Govoni, Marco and Gulans, Andris and Gygi, François and Herbert, John M. and Kokott, Sebastian and Kühne, Thomas and Liou, Kai-Hsin and Miyazaki, Tsuyoshi and Motamarri, Phani and Nakata, Ayako and Pask, John E. and Plessl, Christian and Ratcliff, Laura E. and Richard, Ryan M. and Rossi, Mariana and Schade, Robert and Scheffler, Matthias and Schütt, Ole and Suryanarayana, Phanish and Torrent, Marc and Truflandier, Lionel and Windus, Theresa L. and Xu, Qimen and Yu, Victor W. -Z. and Perez, Danny}}, booktitle = {{arXiv:2209.12747}}, title = {{{Roadmap on Electronic Structure Codes in the Exascale Era}}}, year = {{2022}}, } @article{50146, abstract = {{Recent advances in numerical methods significantly pushed forward the understanding of electrons coupled to quantized lattice vibrations. At this stage, it becomes increasingly important to also account for the effects of physically inevitable environments. In particular, we study the transport properties of the Hubbard-Holstein Hamiltonian that models a large class of materials characterized by strong electron-phonon coupling, in contact with a dissipative environment. Even in the one-dimensional and isolated case, simulating the quantum dynamics of such a system with high accuracy is very challenging due to the infinite dimensionality of the phononic Hilbert spaces. For this reason, the effects of dissipation on the conductance properties of such systems have not been investigated systematically so far. We combine the non-Markovian hierarchy of pure states method and the Markovian quantum jumps method with the newly introduced projected purified density-matrix renormalization group, creating powerful tensor-network methods for dissipative quantum many-body systems. Investigating their numerical properties, we find a significant speedup up to a factor $\sim 30$ compared to conventional tensor-network techniques. We apply these methods to study dissipative quenches, aiming for an in-depth understanding of the formation, stability, and quasi-particle properties of bipolarons. Surprisingly, our results show that in the metallic phase dissipation localizes the bipolarons, which is reminiscent of an indirect quantum Zeno effect. However, the bipolaronic binding energy remains mainly unaffected, even in the presence of strong dissipation, exhibiting remarkable bipolaron stability. These findings shed light on the problem of designing real materials exhibiting phonon-mediated high-$T_\mathrm{C}$ superconductivity.}}, author = {{Moroder, Mattia and Grundner, Martin and Damanet, François and Schollwöck, Ulrich and Mardazad, Sam and Flannigan, Stuart and Köhler, Thomas and Paeckel, Sebastian}}, journal = {{Physical Review B 107, 214310 (2023)}}, title = {{{Stable bipolarons in open quantum systems}}}, doi = {{10.1103/PhysRevB.107.214310}}, year = {{2022}}, } @article{50148, abstract = {{We develop a general decomposition of an ensemble of initial density profiles in terms of an average state and a basis of modes that represent the event-by-event fluctuations of the initial state. The basis is determined such that the probability distributions of the amplitudes of different modes are uncorrelated. Based on this decomposition, we quantify the different types and probabilities of event-by-event fluctuations in Glauber and Saturation models and investigate how the various modes affect different characteristics of the initial state. We perform simulations of the dynamical evolution with KoMPoST and MUSIC to investigate the impact of the modes on final-state observables and their correlations.}}, author = {{Borghini, Nicolas and Borrell, Marc and Feld, Nina and Roch, Hendrik and Schlichting, Sören and Werthmann, Clemens}}, journal = {{Phys. Rev. C 107 (2023) 034905}}, title = {{{Statistical analysis of initial state and final state response in heavy-ion collisions}}}, doi = {{10.1103/PhysRevC.107.034905}}, year = {{2022}}, } @article{33684, author = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas and Plessl, Christian}}, issn = {{0167-8191}}, journal = {{Parallel Computing}}, keywords = {{Artificial Intelligence, Computer Graphics and Computer-Aided Design, Computer Networks and Communications, Hardware and Architecture, Theoretical Computer Science, Software}}, publisher = {{Elsevier BV}}, title = {{{Towards electronic structure-based ab-initio molecular dynamics simulations with hundreds of millions of atoms}}}, doi = {{10.1016/j.parco.2022.102920}}, volume = {{111}}, year = {{2022}}, } @inproceedings{46194, author = {{Kenter, Tobias and Shambhu, Adesh and Faghih-Naini, Sara and Aizinger, Vadym}}, booktitle = {{Proceedings of the Platform for Advanced Scientific Computing Conference}}, publisher = {{ACM}}, title = {{{Algorithm-hardware co-design of a discontinuous Galerkin shallow-water model for a dataflow architecture on FPGA}}}, doi = {{10.1145/3468267.3470617}}, year = {{2021}}, } @article{32240, abstract = {{

The effect of traces of ethanol in supercritical carbon dioxide on the mixture's thermodynamic properties is studied by molecular simulations and Taylor dispersion measurements.

}}, author = {{Chatwell, René Spencer and Guevara-Carrion, Gabriela and Gaponenko, Yuri and Shevtsova, Valentina and Vrabec, Jadran}}, issn = {{1463-9076}}, journal = {{Physical Chemistry Chemical Physics}}, keywords = {{Physical and Theoretical Chemistry, General Physics and Astronomy}}, number = {{4}}, pages = {{3106--3115}}, publisher = {{Royal Society of Chemistry (RSC)}}, title = {{{Diffusion of the carbon dioxide–ethanol mixture in the extended critical region}}}, doi = {{10.1039/d0cp04985a}}, volume = {{23}}, year = {{2021}}, } @article{46122, author = {{Kaczmarek, Olaf and Mazur, Lukas and Sharma, Sayantan}}, issn = {{2470-0010}}, journal = {{Physical Review D}}, number = {{9}}, publisher = {{American Physical Society (APS)}}, title = {{{Eigenvalue spectra of QCD and the fate of UA(1) breaking towards the chiral limit}}}, doi = {{10.1103/physrevd.104.094518}}, volume = {{104}}, year = {{2021}}, } @inbook{29936, author = {{Ramaswami, Arjun and Kenter, Tobias and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Applied Reconfigurable Computing. Architectures, Tools, and Applications}}, isbn = {{9783030790240}}, issn = {{0302-9743}}, publisher = {{Springer International Publishing}}, title = {{{Evaluating the Design Space for Offloading 3D FFT Calculations to an FPGA for High-Performance Computing}}}, doi = {{10.1007/978-3-030-79025-7_21}}, year = {{2021}}, } @inproceedings{20886, author = {{Nickchen, Tobias and Heindorf, Stefan and Engels, Gregor}}, booktitle = {{Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}}, location = {{Hawaii}}, pages = {{1994--2002}}, title = {{{Generating Physically Sound Training Data for Image Recognition of Additively Manufactured Parts}}}, year = {{2021}}, } @article{46124, author = {{Altenkort, Luis and Eller, Alexander M. and Kaczmarek, O. and Mazur, Lukas and Moore, Guy D. and Shu, H.-T.}}, issn = {{2470-0010}}, journal = {{Physical Review D}}, number = {{1}}, publisher = {{American Physical Society (APS)}}, title = {{{Heavy quark momentum diffusion from the lattice using gradient flow}}}, doi = {{10.1103/physrevd.103.014511}}, volume = {{103}}, year = {{2021}}, } @inproceedings{46195, author = {{Karp, Martin and Podobas, Artur and Jansson, Niclas and Kenter, Tobias and Plessl, Christian and Schlatter, Philipp and Markidis, Stefano}}, booktitle = {{2021 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}}, publisher = {{IEEE}}, title = {{{High-Performance Spectral Element Methods on Field-Programmable Gate Arrays : Implementation, Evaluation, and Future Projection}}}, doi = {{10.1109/ipdps49936.2021.00116}}, year = {{2021}}, } @inproceedings{29937, author = {{Karp, Martin and Podobas, Artur and Jansson, Niclas and Kenter, Tobias and Plessl, Christian and Schlatter, Philipp and Markidis, Stefano}}, booktitle = {{2021 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}}, publisher = {{IEEE}}, title = {{{High-Performance Spectral Element Methods on Field-Programmable Gate Arrays : Implementation, Evaluation, and Future Projection}}}, doi = {{10.1109/ipdps49936.2021.00116}}, year = {{2021}}, } @inbook{21587, abstract = {{Solving partial differential equations on unstructured grids is a cornerstone of engineering and scientific computing. Nowadays, heterogeneous parallel platforms with CPUs, GPUs, and FPGAs enable energy-efficient and computationally demanding simulations. We developed the HighPerMeshes C++-embedded Domain-Specific Language (DSL) for bridging the abstraction gap between the mathematical and algorithmic formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different parallel programming and runtime models on the other hand. Thus, the HighPerMeshes DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HighPerMeshes DSL, and demonstrate its usage with three examples, a Poisson and monodomain problem, respectively, solved by the continuous finite element method, and the discontinuous Galerkin method for Maxwell’s equation. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters, is presented. Finally, the achievable performance and scalability are demonstrated for a typical example problem on a multi-core CPU cluster.}}, author = {{Alhaddad, Samer and Förstner, Jens and Groth, Stefan and Grünewald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, Franz-Josef and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, Jürgen and Weiser, Martin and Wende, Florian}}, booktitle = {{Euro-Par 2020: Parallel Processing Workshops}}, isbn = {{9783030715922}}, issn = {{0302-9743}}, keywords = {{tet_topic_hpc}}, title = {{{HighPerMeshes – A Domain-Specific Language for Numerical Algorithms on Unstructured Grids}}}, doi = {{10.1007/978-3-030-71593-9_15}}, year = {{2021}}, } @article{32243, abstract = {{Abstract The defining feature of active particles is that they constantly propel themselves by locally converting chemical energy into directed motion. This active self-propulsion prevents them from equilibrating with their thermal environment (e.g. an aqueous solution), thus keeping them permanently out of equilibrium. Nevertheless, the spatial dynamics of active particles might share certain equilibrium features, in particular in the steady state. We here focus on the time-reversal symmetry of individual spatial trajectories as a distinct equilibrium characteristic. We investigate to what extent the steady-state trajectories of a trapped active particle obey or break this time-reversal symmetry. Within the framework of active Ornstein–Uhlenbeck particles we find that the steady-state trajectories in a harmonic potential fulfill path-wise time-reversal symmetry exactly, while this symmetry is typically broken in anharmonic potentials.}}, author = {{Dabelow, Lennart and Bo, Stefano and Eichhorn, Ralf}}, issn = {{1742-5468}}, journal = {{Journal of Statistical Mechanics: Theory and Experiment}}, keywords = {{Statistics, Probability and Uncertainty, Statistics and Probability, Statistical and Nonlinear Physics}}, number = {{3}}, publisher = {{IOP Publishing}}, title = {{{How irreversible are steady-state trajectories of a trapped active particle?}}}, doi = {{10.1088/1742-5468/abe6fd}}, volume = {{2021}}, year = {{2021}}, } @unpublished{32245, abstract = {{Optical travelling wave antennas offer unique opportunities to control and selectively guide light into a specific direction which renders them as excellent candidates for optical communication and sensing. These applications require state of the art engineering to reach optimized functionalities such as high directivity and radiation efficiency, low side lobe level, broadband and tunable capabilities, and compact design. In this work we report on the numerical optimization of the directivity of optical travelling wave antennas made from low-loss dielectric materials using full-wave numerical simulations in conjunction with a particle swarm optimization algorithm. The antennas are composed of a reflector and a director deposited on a glass substrate and an emitter placed in the feed gap between them serves as an internal source of excitation. In particular, we analysed antennas with rectangular- and horn-shaped directors made of either Hafnium dioxide or Silicon. The optimized antennas produce highly directional emission due to the presence of two dominant guided TE modes in the director in addition to leaky modes. These guided modes dominate the far-field emission pattern and govern the direction of the main lobe emission which predominately originates from the end facet of the director. Our work also provides a comprehensive analysis of the modes, radiation patterns, parametric influences, and bandwidths of the antennas that highlights their robust nature.}}, author = {{Farheen, Henna and Leuteritz, Till and Linden, Stefan and Myroshnychenko, Viktor and Förstner, Jens}}, booktitle = {{arXiv:2106.02468}}, title = {{{Optimization of optical waveguide antennas for directive emission of light}}}, year = {{2021}}, } @article{46123, author = {{Altenkort, Luis and Eller, Alexander M. and Kaczmarek, O. and Mazur, Lukas and Moore, Guy D. and Shu, H.-T.}}, issn = {{2470-0010}}, journal = {{Physical Review D}}, number = {{11}}, publisher = {{American Physical Society (APS)}}, title = {{{Sphaleron rate from Euclidean lattice correlators: An exploration}}}, doi = {{10.1103/physrevd.103.114513}}, volume = {{103}}, year = {{2021}}, } @unpublished{32236, abstract = {{The interaction between quantum light and matter is being intensively studied for systems that are enclosed in high-$Q$ cavities which strongly enhance the light-matter coupling. However, for many applications, cavities with lower $Q$-factors are preferred due to the increased spectral width of the cavity mode. Here, we investigate the interaction between quantum light and matter represented by a $\Lambda$-type three-level system in lossy cavities, assuming that cavity losses are the dominant loss mechanism. We demonstrate that cavity losses lead to non-trivial steady states of the electronic occupations that can be controlled by the loss rate and the initial statistics of the quantum fields. The mechanism of formation of such steady states can be understood on the basis of the equations of motion. Analytical expressions for steady states and their numerical simulations are presented and discussed.}}, author = {{Rose, H. and Tikhonova, O. V. and Meier, T. and Sharapova, P. }}, booktitle = {{arXiv:2109.00842}}, title = {{{Steady states of $Λ$-type three-level systems excited by quantum light in lossy cavities}}}, year = {{2021}}, } @article{24788, author = {{Alhaddad, Samer and Förstner, Jens and Groth, Stefan and Grünewald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, Franz‐Josef and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, Jürgen and Weiser, Martin and Wende, Florian}}, issn = {{1532-0626}}, journal = {{Concurrency and Computation: Practice and Experience}}, keywords = {{tet_topic_hpc}}, pages = {{e6616}}, title = {{{The HighPerMeshes framework for numerical algorithms on unstructured grids}}}, doi = {{10.1002/cpe.6616}}, year = {{2021}}, } @article{28099, abstract = {{N-body methods are one of the essential algorithmic building blocks of high-performance and parallel computing. Previous research has shown promising performance for implementing n-body simulations with pairwise force calculations on FPGAs. However, to avoid challenges with accumulation and memory access patterns, the presented designs calculate each pair of forces twice, along with both force sums of the involved particles. Also, they require large problem instances with hundreds of thousands of particles to reach their respective peak performance, limiting the applicability for strong scaling scenarios. This work addresses both issues by presenting a novel FPGA design that uses each calculated force twice and overlaps data transfers and computations in a way that allows to reach peak performance even for small problem instances, outperforming previous single precision results even in double precision, and scaling linearly over multiple interconnected FPGAs. For a comparison across architectures, we provide an equally optimized CPU reference, which for large problems actually achieves higher peak performance per device, however, given the strong scaling advantages of the FPGA design, in parallel setups with few thousand particles per device, the FPGA platform achieves highest performance and power efficiency.}}, author = {{Menzel, Johannes and Plessl, Christian and Kenter, Tobias}}, issn = {{1936-7406}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems}}, number = {{1}}, pages = {{1--30}}, title = {{{The Strong Scaling Advantage of FPGAs in HPC for N-body Simulations}}}, doi = {{10.1145/3491235}}, volume = {{15}}, year = {{2021}}, } @unpublished{32244, abstract = {{We push the boundaries of electronic structure-based \textit{ab-initio} molecular dynamics (AIMD) beyond 100 million atoms. This scale is otherwise barely reachable with classical force-field methods or novel neural network and machine learning potentials. We achieve this breakthrough by combining innovations in linear-scaling AIMD, efficient and approximate sparse linear algebra, low and mixed-precision floating-point computation on GPUs, and a compensation scheme for the errors introduced by numerical approximations. The core of our work is the non-orthogonalized local submatrix method (NOLSM), which scales very favorably to massively parallel computing systems and translates large sparse matrix operations into highly parallel, dense matrix operations that are ideally suited to hardware accelerators. We demonstrate that the NOLSM method, which is at the center point of each AIMD step, is able to achieve a sustained performance of 324 PFLOP/s in mixed FP16/FP32 precision corresponding to an efficiency of 67.7% when running on 1536 NVIDIA A100 GPUs.}}, author = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas D. and Plessl, Christian}}, booktitle = {{arXiv:2104.08245}}, title = {{{Towards Electronic Structure-Based Ab-Initio Molecular Dynamics Simulations with Hundreds of Millions of Atoms}}}, year = {{2021}}, } @inproceedings{27365, author = {{Meyer, Marius}}, booktitle = {{Proceedings of the 11th International Symposium on Highly Efficient Accelerators and Reconfigurable Technologies}}, title = {{{Towards Performance Characterization of FPGAs in Context of HPC using OpenCL Benchmarks}}}, doi = {{10.1145/3468044.3468058}}, year = {{2021}}, } @article{32246, abstract = {{

State-of-the-art methods in materials science such as artificial intelligence and data-driven techniques advance the investigation of photovoltaic materials.

}}, author = {{Mirhosseini, Hossein and Kormath Madam Raghupathy, Ramya and Sahoo, Sudhir K. and Wiebeler, Hendrik and Chugh, Manjusha and Kühne, Thomas D.}}, issn = {{1463-9076}}, journal = {{Physical Chemistry Chemical Physics}}, keywords = {{Physical and Theoretical Chemistry, General Physics and Astronomy}}, number = {{46}}, pages = {{26682--26701}}, publisher = {{Royal Society of Chemistry (RSC)}}, title = {{{In silico investigation of Cu(In,Ga)Se2-based solar cells}}}, doi = {{10.1039/d0cp04712k}}, volume = {{22}}, year = {{2020}}, } @inproceedings{16898, abstract = {{Electronic structure calculations based on density-functional theory (DFT) represent a significant part of today's HPC workloads and pose high demands on high-performance computing resources. To perform these quantum-mechanical DFT calculations on complex large-scale systems, so-called linear scaling methods instead of conventional cubic scaling methods are required. In this work, we take up the idea of the submatrix method and apply it to the DFT computations in the software package CP2K. For that purpose, we transform the underlying numeric operations on distributed, large, sparse matrices into computations on local, much smaller and nearly dense matrices. This allows us to exploit the full floating-point performance of modern CPUs and to make use of dedicated accelerator hardware, where performance has been limited by memory bandwidth before. We demonstrate both functionality and performance of our implementation and show how it can be accelerated with GPUs and FPGAs.}}, author = {{Lass, Michael and Schade, Robert and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Proc. International Conference for High Performance Computing, Networking, Storage and Analysis (SC)}}, location = {{Atlanta, GA, US}}, pages = {{1127--1140}}, publisher = {{IEEE Computer Society}}, title = {{{A Submatrix-Based Method for Approximate Matrix Function Evaluation in the Quantum Chemistry Code CP2K}}}, doi = {{10.1109/SC41405.2020.00084}}, year = {{2020}}, } @article{12878, abstract = {{In scientific computing, the acceleration of atomistic computer simulations by means of custom hardware is finding ever-growing application. A major limitation, however, is that the high efficiency in terms of performance and low power consumption entails the massive usage of low precision computing units. Here, based on the approximate computing paradigm, we present an algorithmic method to compensate for numerical inaccuracies due to low accuracy arithmetic operations rigorously, yet still obtaining exact expectation values using a properly modified Langevin-type equation.}}, author = {{Rengaraj, Varadarajan and Lass, Michael and Plessl, Christian and Kühne, Thomas}}, journal = {{Computation}}, number = {{2}}, publisher = {{MDPI}}, title = {{{Accurate Sampling with Noisy Forces from Approximate Computing}}}, doi = {{10.3390/computation8020039}}, volume = {{8}}, year = {{2020}}, } @article{16277, abstract = {{CP2K is an open source electronic structure and molecular dynamics software package to perform atomistic simulations of solid-state, liquid, molecular, and biological systems. It is especially aimed at massively parallel and linear-scaling electronic structure methods and state-of-theart ab initio molecular dynamics simulations. Excellent performance for electronic structure calculations is achieved using novel algorithms implemented for modern high-performance computing systems. This review revisits the main capabilities of CP2K to perform efficient and accurate electronic structure simulations. The emphasis is put on density functional theory and multiple post–Hartree–Fock methods using the Gaussian and plane wave approach and its augmented all-electron extension.}}, author = {{Kühne, Thomas and Iannuzzi, Marcella and Ben, Mauro Del and Rybkin, Vladimir V. and Seewald, Patrick and Stein, Frederick and Laino, Teodoro and Khaliullin, Rustam Z. and Schütt, Ole and Schiffmann, Florian and Golze, Dorothea and Wilhelm, Jan and Chulkov, Sergey and Mohammad Hossein Bani-Hashemian, Mohammad Hossein Bani-Hashemian and Weber, Valéry and Borstnik, Urban and Taillefumier, Mathieu and Jakobovits, Alice Shoshana and Lazzaro, Alfio and Pabst, Hans and Müller, Tiziano and Schade, Robert and Guidon, Manuel and Andermatt, Samuel and Holmberg, Nico and Schenter, Gregory K. and Hehn, Anna and Bussy, Augustin and Belleflamme, Fabian and Tabacchi, Gloria and Glöß, Andreas and Lass, Michael and Bethune, Iain and Mundy, Christopher J. and Plessl, Christian and Watkins, Matt and VandeVondele, Joost and Krack, Matthias and Hutter, Jürg}}, journal = {{The Journal of Chemical Physics}}, number = {{19}}, title = {{{CP2K: An electronic structure and molecular dynamics software package - Quickstep: Efficient and accurate electronic structure calculations}}}, doi = {{10.1063/5.0007045}}, volume = {{152}}, year = {{2020}}, } @inproceedings{21632, abstract = {{FPGAs have found increasing adoption in data center applications since a new generation of high-level tools have become available which noticeably reduce development time for FPGA accelerators and still provide high-quality results. There is, however, no high-level benchmark suite available, which specifically enables a comparison of FPGA architectures, programming tools, and libraries for HPC applications. To fill this gap, we have developed an OpenCL-based open-source implementation of the HPCC benchmark suite for Xilinx and Intel FPGAs. This benchmark can serve to analyze the current capabilities of FPGA devices, cards, and development tool flows, track progress over time, and point out specific difficulties for FPGA acceleration in the HPC domain. Additionally, the benchmark documents proven performance optimization patterns. We will continue optimizing and porting the benchmark for new generations of FPGAs and design tools and encourage active participation to create a valuable tool for the community. To fill this gap, we have developed an OpenCL-based open-source implementation of the HPCC benchmark suite for Xilinx and Intel FPGAs. This benchmark can serve to analyze the current capabilities of FPGA devices, cards, and development tool flows, track progress over time, and point out specific difficulties for FPGA acceleration in the HPC domain. Additionally, the benchmark documents proven performance optimization patterns. We will continue optimizing and porting the benchmark for new generations of FPGAs and design tools and encourage active participation to create a valuable tool for the community.}}, author = {{Meyer, Marius and Kenter, Tobias and Plessl, Christian}}, booktitle = {{2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}}, isbn = {{9781665415927}}, keywords = {{FPGA, OpenCL, High Level Synthesis, HPC benchmarking}}, title = {{{Evaluating FPGA Accelerator Performance with a Parameterized OpenCL Adaptation of Selected Benchmarks of the HPCChallenge Benchmark Suite}}}, doi = {{10.1109/h2rc51942.2020.00007}}, year = {{2020}}, } @unpublished{32242, abstract = {{We consider a resource-aware variant of the classical multi-armed bandit problem: In each round, the learner selects an arm and determines a resource limit. It then observes a corresponding (random) reward, provided the (random) amount of consumed resources remains below the limit. Otherwise, the observation is censored, i.e., no reward is obtained. For this problem setting, we introduce a measure of regret, which incorporates the actual amount of allocated resources of each learning round as well as the optimality of realizable rewards. Thus, to minimize regret, the learner needs to set a resource limit and choose an arm in such a way that the chance to realize a high reward within the predefined resource limit is high, while the resource limit itself should be kept as low as possible. We derive the theoretical lower bound on the cumulative regret and propose a learning algorithm having a regret upper bound that matches the lower bound. In a simulation study, we show that our learning algorithm outperforms straightforward extensions of standard multi-armed bandit algorithms.}}, author = {{Bengs, Viktor and Hüllermeier, Eyke}}, booktitle = {{arXiv:2011.00813}}, title = {{{Multi-Armed Bandits with Censored Consumption of Resources}}}, year = {{2020}}, } @inbook{18789, author = {{Nickchen, Tobias and Engels, Gregor and Lohn, Johannes}}, booktitle = {{Industrializing Additive Manufacturing}}, isbn = {{9783030543334}}, title = {{{Opportunities of 3D Machine Learning for Manufacturability Analysis and Component Recognition in the Additive Manufacturing Process Chain}}}, doi = {{10.1007/978-3-030-54334-1_4}}, year = {{2020}}, } @article{21, abstract = {{We address the general mathematical problem of computing the inverse p-th root of a given matrix in an efficient way. A new method to construct iteration functions that allow calculating arbitrary p-th roots and their inverses of symmetric positive definite matrices is presented. We show that the order of convergence is at least quadratic and that adaptively adjusting a parameter q always leads to an even faster convergence. In this way, a better performance than with previously known iteration schemes is achieved. The efficiency of the iterative functions is demonstrated for various matrices with different densities, condition numbers and spectral radii.}}, author = {{Richters, Dorothee and Lass, Michael and Walther, Andrea and Plessl, Christian and Kühne, Thomas}}, journal = {{Communications in Computational Physics}}, number = {{2}}, pages = {{564--585}}, publisher = {{Global Science Press}}, title = {{{A General Algorithm to Calculate the Inverse Principal p-th Root of Symmetric Positive Definite Matrices}}}, doi = {{10.4208/cicp.OA-2018-0053}}, volume = {{25}}, year = {{2019}}, } @phdthesis{34167, author = {{Riebler, Heinrich}}, title = {{{Efficient parallel branch-and-bound search on FPGAs using work stealing and instance-specific designs}}}, doi = {{10.17619/UNIPB/1-830}}, year = {{2019}}, } @article{12871, author = {{Platzner, Marco and Plessl, Christian}}, issn = {{0170-6012}}, journal = {{Informatik Spektrum}}, title = {{{FPGAs im Rechenzentrum}}}, doi = {{10.1007/s00287-019-01187-w}}, year = {{2019}}, } @inproceedings{15478, abstract = {{Stratix 10 FPGA cards have a good potential for the acceleration of HPC workloads since the Stratix 10 product line introduces devices with a large number of DSP and memory blocks. The high level synthesis of OpenCL codes can play a fundamental role for FPGAs in HPC, because it allows to implement different designs with lower development effort compared to hand optimized HDL. However, Stratix 10 cards are still hard to fully exploit using the Intel FPGA SDK for OpenCL. The implementation of designs with thousands of concurrent arithmetic operations often suffers from place and route problems that limit the maximum frequency or entirely prevent a successful synthesis. In order to overcome these issues for the implementation of the matrix multiplication, we formulate Cannon's matrix multiplication algorithm with regard to its efficient synthesis within the FPGA logic. We obtain a two-level block algorithm, where the lower level sub-matrices are multiplied using our Cannon's algorithm implementation. Following this design approach with multiple compute units, we are able to get maximum frequencies close to and above 300 MHz with high utilization of DSP and memory blocks. This allows for performance results above 1 TeraFLOPS.}}, author = {{Gorlani, Paolo and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on Field-Programmable Technology (FPT)}}, publisher = {{IEEE}}, title = {{{OpenCL Implementation of Cannon's Matrix Multiplication Algorithm on Intel Stratix 10 FPGAs}}}, doi = {{10.1109/ICFPT47387.2019.00020}}, year = {{2019}}, } @article{7689, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Kenter, Tobias and Plessl, Christian}}, journal = {{ACM Trans. Archit. Code Optim. (TACO)}}, keywords = {{htrop}}, number = {{2}}, pages = {{14:1–14:26}}, publisher = {{ACM}}, title = {{{Transparent Acceleration for Heterogeneous Platforms with Compilation to OpenCL}}}, doi = {{10.1145/3319423}}, volume = {{16}}, year = {{2019}}, } @inproceedings{22, abstract = {{This paper describes a data structure and a heuristic to plan and map arbitrary resources in complex combinations while applying time dependent constraints. The approach is used in the planning based workload manager OpenCCS at the Paderborn Center for Parallel Computing (PC\(^2\)) to operate heterogeneous clusters with up to 10000 cores. We also show performance results derived from four years of operation.}}, author = {{Keller, Axel}}, booktitle = {{Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)}}, editor = {{Klusáček, D. and Cirne, W. and Desai, N.}}, isbn = {{978-3-319-77398-8}}, keywords = {{Scheduling Planning Mapping Workload management}}, location = {{Orlando, FL, USA}}, pages = {{132--151}}, publisher = {{Springer}}, title = {{{A Data Structure for Planning Based Workload Management of Heterogeneous HPC Systems}}}, doi = {{10.1007/978-3-319-77398-8_8}}, volume = {{10773}}, year = {{2018}}, } @inproceedings{1590, abstract = {{We present the submatrix method, a highly parallelizable method for the approximate calculation of inverse p-th roots of large sparse symmetric matrices which are required in different scientific applications. Following the idea of Approximate Computing, we allow imprecision in the final result in order to utilize the sparsity of the input matrix and to allow massively parallel execution. For an n x n matrix, the proposed algorithm allows to distribute the calculations over n nodes with only little communication overhead. The result matrix exhibits the same sparsity pattern as the input matrix, allowing for efficient reuse of allocated data structures. We evaluate the algorithm with respect to the error that it introduces into calculated results, as well as its performance and scalability. We demonstrate that the error is relatively limited for well-conditioned matrices and that results are still valuable for error-resilient applications like preconditioning even for ill-conditioned matrices. We discuss the execution time and scaling of the algorithm on a theoretical level and present a distributed implementation of the algorithm using MPI and OpenMP. We demonstrate the scalability of this implementation by running it on a high-performance compute cluster comprised of 1024 CPU cores, showing a speedup of 665x compared to single-threaded execution.}}, author = {{Lass, Michael and Mohr, Stephan and Wiebeler, Hendrik and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Proc. Platform for Advanced Scientific Computing (PASC) Conference}}, isbn = {{978-1-4503-5891-0/18/07}}, keywords = {{approximate computing, linear algebra, matrix inversion, matrix p-th roots, numeric algorithm, parallel computing}}, location = {{Basel, Switzerland}}, publisher = {{ACM}}, title = {{{A Massively Parallel Algorithm for the Approximate Calculation of Inverse p-th Roots of Large Sparse Matrices}}}, doi = {{10.1145/3218176.3218231}}, year = {{2018}}, } @inproceedings{1204, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proc. ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (PPoPP)}}, isbn = {{9781450349826}}, keywords = {{htrop}}, publisher = {{ACM}}, title = {{{Automated Code Acceleration Targeting Heterogeneous OpenCL Devices}}}, doi = {{10.1145/3178487.3178534}}, year = {{2018}}, } @inproceedings{1588, abstract = {{The exploration of FPGAs as accelerators for scientific simulations has so far mostly been focused on small kernels of methods working on regular data structures, for example in the form of stencil computations for finite difference methods. In computational sciences, often more advanced methods are employed that promise better stability, convergence, locality and scaling. Unstructured meshes are shown to be more effective and more accurate, compared to regular grids, in representing computation domains of various shapes. Using unstructured meshes, the discontinuous Galerkin method preserves the ability to perform explicit local update operations for simulations in the time domain. In this work, we investigate FPGAs as target platform for an implementation of the nodal discontinuous Galerkin method to find time-domain solutions of Maxwell's equations in an unstructured mesh. When maximizing data reuse and fitting constant coefficients into suitably partitioned on-chip memory, high computational intensity allows us to implement and feed wide data paths with hundreds of floating point operators. By decoupling off-chip memory accesses from the computations, high memory bandwidth can be sustained, even for the irregular access pattern required by parts of the application. Using the Intel/Altera OpenCL SDK for FPGAs, we present different implementation variants for different polynomial orders of the method. In different phases of the algorithm, either computational or bandwidth limits of the Arria 10 platform are almost reached, thus outperforming a highly multithreaded CPU implementation by around 2x.}}, author = {{Kenter, Tobias and Mahale, Gopinath and Alhaddad, Samer and Grynko, Yevgen and Schmitt, Christian and Afzal, Ayesha and Hannig, Frank and Förstner, Jens and Plessl, Christian}}, booktitle = {{Proc. Int. Symp. on Field-Programmable Custom Computing Machines (FCCM)}}, keywords = {{tet_topic_hpc}}, publisher = {{IEEE}}, title = {{{OpenCL-based FPGA Design to Accelerate the Nodal Discontinuous Galerkin Method for Unstructured Meshes}}}, doi = {{10.1109/FCCM.2018.00037}}, year = {{2018}}, } @article{6516, author = {{Mertens, Jan Cedric and Boschmann, Alexander and Schmidt, M. and Plessl, Christian}}, issn = {{1369-7072}}, journal = {{Sports Engineering}}, number = {{4}}, pages = {{441--451}}, publisher = {{Springer Nature}}, title = {{{Sprint diagnostic with GPS and inertial sensor fusion}}}, doi = {{10.1007/s12283-018-0291-0}}, volume = {{21}}, year = {{2018}}, } @article{13348, author = {{Luk, Samuel M. H. and Lewandowski, P. and Kwong, N. H. and Baudin, E. and Lafont, O. and Tignon, J. and Leung, P. T. and Chan, Ch. K. P. and Babilon, M. and Schumacher, Stefan and Binder, R.}}, issn = {{0740-3224}}, journal = {{Journal of the Optical Society of America B}}, number = {{1}}, title = {{{Theory of optically controlled anisotropic polariton transport in semiconductor double microcavities}}}, doi = {{10.1364/josab.35.000146}}, volume = {{35}}, year = {{2018}}, } @article{20, abstract = {{Approximate computing has shown to provide new ways to improve performance and power consumption of error-resilient applications. While many of these applications can be found in image processing, data classification or machine learning, we demonstrate its suitability to a problem from scientific computing. Utilizing the self-correcting behavior of iterative algorithms, we show that approximate computing can be applied to the calculation of inverse matrix p-th roots which are required in many applications in scientific computing. Results show great opportunities to reduce the computational effort and bandwidth required for the execution of the discussed algorithm, especially when targeting special accelerator hardware.}}, author = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}}, issn = {{1943-0671}}, journal = {{Embedded Systems Letters}}, number = {{2}}, pages = {{ 33--36}}, publisher = {{IEEE}}, title = {{{Using Approximate Computing for the Calculation of Inverse Matrix p-th Roots}}}, doi = {{10.1109/LES.2017.2760923}}, volume = {{10}}, year = {{2018}}, } @article{18, abstract = {{Branch and bound (B&B) algorithms structure the search space as a tree and eliminate infeasible solutions early by pruning subtrees that cannot lead to a valid or optimal solution. Custom hardware designs significantly accelerate the execution of these algorithms. In this article, we demonstrate a high-performance B&B implementation on FPGAs. First, we identify general elements of B&B algorithms and describe their implementation as a finite state machine. Then, we introduce workers that autonomously cooperate using work stealing to allow parallel execution and full utilization of the target FPGA. Finally, we explore advantages of instance-specific designs that target a specific problem instance to improve performance. We evaluate our concepts by applying them to a branch and bound problem, the reconstruction of corrupted AES keys obtained from cold-boot attacks. The evaluation shows that our work stealing approach is scalable with the available resources and provides speedups proportional to the number of workers. Instance-specific designs allow us to achieve an overall speedup of 47 × compared to the fastest implementation of AES key reconstruction so far. Finally, we demonstrate how instance-specific designs can be generated just-in-time such that the provided speedups outweigh the additional time required for design synthesis.}}, author = {{Riebler, Heinrich and Lass, Michael and Mittendorf, Robert and Löcke, Thomas and Plessl, Christian}}, issn = {{1936-7406}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems (TRETS)}}, keywords = {{coldboot}}, number = {{3}}, pages = {{24:1--24:23}}, publisher = {{Association for Computing Machinery (ACM)}}, title = {{{Efficient Branch and Bound on FPGAs Using Work Stealing and Instance-Specific Designs}}}, doi = {{10.1145/3053687}}, volume = {{10}}, year = {{2017}}, } @inproceedings{1592, abstract = {{Compared to classical HDL designs, generating FPGA with high-level synthesis from an OpenCL specification promises easier exploration of different design alternatives and, through ready-to-use infrastructure and common abstractions for host and memory interfaces, easier portability between different FPGA families. In this work, we evaluate the extent of this promise. To this end, we present a parameterized FDTD implementation for photonic microcavity simulations. Our design can trade-off different forms of parallelism and works for two independent OpenCL-based FPGA design flows. Hence, we can target FPGAs from different vendors and different FPGA families. We describe how we used pre-processor macros to achieve this flexibility and to work around different shortcomings of the current tools. Choosing the right design configurations, we are able to present two extremely competitive solutions for very different FPGA targets, reaching up to 172 GFLOPS sustained performance. With the portability and flexibility demonstrated, code developers not only avoid vendor lock-in, but can even make best use of real trade-offs between different architectures.}}, author = {{Kenter, Tobias and Förstner, Jens and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on Field Programmable Logic and Applications (FPL)}}, keywords = {{tet_topic_hpc}}, publisher = {{IEEE}}, title = {{{Flexible FPGA design for FDTD using OpenCL}}}, doi = {{10.23919/FPL.2017.8056844}}, year = {{2017}}, } @article{1589, author = {{Schumacher, Jörn and Plessl, Christian and Vandelli, Wainer}}, journal = {{Journal of Physics: Conference Series}}, publisher = {{IOP Publishing}}, title = {{{High-Throughput and Low-Latency Network Communication with NetIO}}}, doi = {{10.1088/1742-6596/898/8/082003}}, volume = {{898}}, year = {{2017}}, } @inproceedings{19, abstract = {{Version Control Systems (VCS) are a valuable tool for software development and document management. Both client/server and distributed (Peer-to-Peer) models exist, with the latter (e.g., Git and Mercurial) becoming increasingly popular. Their distributed nature introduces complications, especially concerning security: it is hard to control the dissemination of contents stored in distributed VCS as they rely on replication of complete repositories to any involved user. We overcome this issue by designing and implementing a concept for cryptography-enforced access control which is transparent to the user. Use of field-tested schemes (end-to-end encryption, digital signatures) allows for strong security, while adoption of convergent encryption and content-defined chunking retains storage efficiency. The concept is seamlessly integrated into Mercurial---respecting its distributed storage concept---to ensure practical usability and compatibility to existing deployments.}}, author = {{Lass, Michael and Leibenger, Dominik and Sorge, Christoph}}, booktitle = {{Proc. 41st Conference on Local Computer Networks (LCN)}}, isbn = {{978-1-5090-2054-6}}, keywords = {{access control, distributed version control systems, mercurial, peer-to-peer, convergent encryption, confidentiality, authenticity}}, publisher = {{IEEE}}, title = {{{Confidentiality and Authenticity for Distributed Version Control Systems - A Mercurial Extension}}}, doi = {{10.1109/lcn.2016.11}}, year = {{2016}}, } @inproceedings{24, author = {{Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proc. Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}}, title = {{{Microdisk Cavity FDTD Simulation on FPGA using OpenCL}}}, year = {{2016}}, } @phdthesis{33, abstract = {{Lightweight materials play an ever growing role in today's world. Saving on the mass of a machine will usually translate into a lower energy consumption. However, lightweight applications are prone to develop performance problems due to vibration induced by the operation of the machine. The Fraunhofer Institute for Manufacturing Technology and Advanced Materials in Dresden conducts research into the damping properties of composite materials. They are experimenting with hollow, particle filled spheres embedded in the lightweight material. Such a system is the technical motivation of this thesis. Ultimately, a numerical experiment to derive the coefficient of restitution is required. The simulation developed in this thesis is based on a discrete element method to track the individual particle and sphere trajectories. Based on a potential based approach for the particle interactions deployed in molecular dynamics, the behavior of the particles can be controlled effectively. The simulated volume is using reflecting boundaries and encloses the hollow sphere. In this work, a highly flexible memory structure was used with a linked cell approach to cope with the highly flexible mass of particles. This allows for a linear complexity of the method in regard to the particle number by reducing the computational overhead of the interaction computation. Multiple numerical experiments show the great effect the particles have on the damping behavior of the system.}}, author = {{Steinle, Tobias}}, title = {{{Modeling and simulation of metallic, particle-damped spheres for lightweight materials}}}, year = {{2016}}, } @inproceedings{34, author = {{Dellnitz, Michael and Eckstein, Julian and Flaßkamp, Kathrin and Friedel, Patrick and Horenkamp, Christian and Köhler, Ulrich and Ober-Blöbaum, Sina and Peitz, Sebastian and Tiemeyer, Sebastian}}, booktitle = {{Progress in Industrial Mathematics at ECMI}}, issn = {{2212-0173}}, pages = {{633--641}}, publisher = {{Springer International Publishing}}, title = {{{Multiobjective Optimal Control Methods for the Development of an Intelligent Cruise Control}}}, doi = {{10.1007/978-3-319-23413-7_87}}, volume = {{22}}, year = {{2016}}, } @inproceedings{171, author = {{Kenter, Tobias and Vaz, Gavin Francis and Riebler, Heinrich and Plessl, Christian}}, booktitle = {{Workshop on Reconfigurable Computing (WRC)}}, title = {{{Opportunities for deferring application partitioning and accelerator synthesis to runtime (extended abstract)}}}, year = {{2016}}, } @inproceedings{168, abstract = {{The use of heterogeneous computing resources, such as Graphic Processing Units or other specialized coprocessors, has become widespread in recent years because of their per- formance and energy efficiency advantages. Approaches for managing and scheduling tasks to heterogeneous resources are still subject to research. Although queuing systems have recently been extended to support accelerator resources, a general solution that manages heterogeneous resources at the operating system- level to exploit a global view of the system state is still missing.In this paper we present a user space scheduler that enables task scheduling and migration on heterogeneous processing resources in Linux. Using run queues for available resources we perform scheduling decisions based on the system state and on task characterization from earlier measurements. With a pro- gramming pattern that supports the integration of checkpoints into applications, we preempt tasks and migrate them between three very different compute resources. Considering static and dynamic workload scenarios, we show that this approach can gain up to 17% performance, on average 7%, by effectively avoiding idle resources. We demonstrate that a work-conserving strategy without migration is no suitable alternative.}}, author = {{Lösch, Achim and Beisel, Tobias and Kenter, Tobias and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proceedings of the 2016 Design, Automation & Test in Europe Conference & Exhibition (DATE)}}, pages = {{912--917}}, publisher = {{EDA Consortium / IEEE}}, title = {{{Performance-centric scheduling with task migration for a heterogeneous compute node in the data center}}}, year = {{2016}}, } @article{165, abstract = {{A broad spectrum of applications can be accelerated by offloading computation intensive parts to reconfigurable hardware. However, to achieve speedups, the number of loop it- erations (trip count) needs to be sufficiently large to amortize offloading overheads. Trip counts are frequently not known at compile time, but only at runtime just before entering a loop. Therefore, we propose to generate code for both the CPU and the coprocessor, and defer the offloading decision to the application runtime. We demonstrate how a toolflow, based on the LLVM compiler framework, can automatically embed dynamic offloading de- cisions into the application code. We perform in-depth static and dynamic analysis of pop- ular benchmarks, which confirm the general potential of such an approach. We also pro- pose to optimize the offloading process by decoupling the runtime decision from the loop execution (decision slack). The feasibility of our approach is demonstrated by a toolflow that automatically identifies suitable data-parallel loops and generates code for the FPGA coprocessor of a Convey HC-1. We evaluate the integrated toolflow with representative loops executed for different input data sizes.}}, author = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}}, issn = {{0045-7906}}, journal = {{Computers and Electrical Engineering}}, pages = {{91--111}}, publisher = {{Elsevier}}, title = {{{Potential and Methods for Embedding Dynamic Offloading Decisions into Application Code}}}, doi = {{10.1016/j.compeleceng.2016.04.021}}, volume = {{55}}, year = {{2016}}, } @phdthesis{161, author = {{Kenter, Tobias}}, publisher = {{Universität Paderborn}}, title = {{{Reconfigurable Accelerators in the World of General-Purpose Computing}}}, year = {{2016}}, } @inbook{29, abstract = {{In this chapter, we present an introduction to the ReconOS operating system for reconfigurable computing. ReconOS offers a unified multi-threaded programming model and operating system services for threads executing in software and threads mapped to reconfigurable hardware. By supporting standard POSIX operating system functions for both software and hardware threads, ReconOS particularly caters to developers with a software background, because developers can use well-known mechanisms such as semaphores, mutexes, condition variables, and message queues for developing hybrid applications with threads running on the CPU and FPGA concurrently. Through the semantic integration of hardware accelerators into a standard operating system environment, ReconOS allows for rapid design space exploration, supports a structured application development process and improves the portability of applications between different reconfigurable computing systems.}}, author = {{Agne, Andreas and Platzner, Marco and Plessl, Christian and Happe, Markus and Lübbers, Enno}}, booktitle = {{FPGAs for Software Programmers}}, editor = {{Koch, Dirk and Hannig, Frank and Ziener, Daniel}}, isbn = {{978-3-319-26406-6}}, pages = {{227--244}}, publisher = {{Springer International Publishing}}, title = {{{ReconOS}}}, doi = {{10.1007/978-3-319-26408-0_13}}, year = {{2016}}, } @inbook{156, abstract = {{Many modern compute nodes are heterogeneous multi-cores that integrate several CPU cores with fixed function or reconfigurable hardware cores. Such systems need to adapt task scheduling and mapping to optimise for performance and energy under varying workloads and, increasingly important, for thermal and fault management and are thus relevant targets for self-aware computing. In this chapter, we take up the generic reference architecture for designing self-aware and self-expressive computing systems and refine it for heterogeneous multi-cores. We present ReconOS, an architecture, programming model and execution environment for heterogeneous multi-cores, and show how the components of the reference architecture can be implemented on top of ReconOS. In particular, the unique feature of dynamic partial reconfiguration supports self-expression through starting and terminating reconfigurable hardware cores. We detail a case study that runs two applications on an architecture with one CPU and 12 reconfigurable hardware cores and present self-expression strategies for adapting under performance, temperature and even conflicting constraints. The case study demonstrates that the reference architecture as a model for self-aware computing is highly useful as it allows us to structure and simplify the design process, which will be essential for designing complex future compute nodes. Furthermore, ReconOS is used as a base technology for flexible protocol stacks in Chapter 10, an approach for self-aware computing at the networking level.}}, author = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}}, booktitle = {{Self-aware Computing Systems}}, pages = {{145--165}}, publisher = {{Springer International Publishing}}, title = {{{Self-aware Compute Nodes}}}, doi = {{10.1007/978-3-319-39675-0_8}}, year = {{2016}}, } @inproceedings{25, author = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Workshop on Approximate Computing (AC)}}, title = {{{Using Approximate Computing in Scientific Codes}}}, year = {{2016}}, } @inproceedings{31, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G. and Durelli, Gianluca C. and Bolchini, Cristiana}}, booktitle = {{Proc. HiPEAC Workshop on Reonfigurable Computing (WRC)}}, title = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}}, year = {{2016}}, } @inproceedings{138, abstract = {{Hardware accelerators are becoming popular in academia and industry. To move one step further from the state-of-the-art multicore plus accelerator approaches, we present in this paper our innovative SAVEHSA architecture. It comprises of a heterogeneous hardware platform with three different high-end accelerators attached over PCIe (GPGPU, FPGA and Intel MIC). Such systems can process parallel workloads very efficiently whilst being more energy efficient than regular CPU systems. To leverage the heterogeneity, the workload has to be distributed among the computing units in a way that each unit is well-suited for the assigned task and executable code must be available. To tackle this problem we present two software components; the first can perform resource allocation at runtime while respecting system and application goals (in terms of throughput, energy, latency, etc.) and the second is able to analyze an application and generate executable code for an accelerator at runtime. We demonstrate the first proof-of-concept implementation of our framework on the heterogeneous platform, discuss different runtime policies and measure the introduced overheads.}}, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G. and Durelli, Gianluca C. and Del Sozzo, Emanuele and Santambrogio, Marco D. and Bolchini, Christina}}, booktitle = {{Proceedings of International Forum on Research and Technologies for Society and Industry (RTSI)}}, pages = {{1--5}}, publisher = {{IEEE}}, title = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}}, doi = {{10.1109/RTSI.2016.7740545}}, year = {{2016}}, } @article{1768, author = {{Plessl, Christian and Platzner, Marco and Schreier, Peter J.}}, journal = {{Informatik Spektrum}}, keywords = {{approximate computing, survey}}, number = {{5}}, pages = {{396--399}}, publisher = {{Springer}}, title = {{{Aktuelles Schlagwort: Approximate Computing}}}, doi = {{10.1007/s00287-015-0911-z}}, year = {{2015}}, } @inproceedings{303, abstract = {{This paper introduces Binary Acceleration At Runtime(BAAR), an easy-to-use on-the-fly binary acceleration mechanismwhich aims to tackle the problem of enabling existentsoftware to automatically utilize accelerators at runtime. BAARis based on the LLVM Compiler Infrastructure and has aclient-server architecture. The client runs the program to beaccelerated in an environment which allows program analysisand profiling. Program parts which are identified as suitable forthe available accelerator are exported and sent to the server.The server optimizes these program parts for the acceleratorand provides RPC execution for the client. The client transformsits program to utilize accelerated execution on the server foroffloaded program parts. We evaluate our work with a proofof-concept implementation of BAAR that uses an Intel XeonPhi 5110P as the acceleration target and performs automaticoffloading, parallelization and vectorization of suitable programparts. The practicality of BAAR for real-world examples is shownbased on a study of stencil codes. Our results show a speedup ofup to 4 without any developer-provided hints and 5.77 withhints over the same code compiled with the Intel Compiler atoptimization level O2 and running on an Intel Xeon E5-2670machine. Based on our insights gained during implementationand evaluation we outline future directions of research, e.g.,offloading more fine-granular program parts than functions, amore sophisticated communication mechanism or introducing onstack-replacement.}}, author = {{Damschen, Marvin and Plessl, Christian}}, booktitle = {{Proceedings of the 5th International Workshop on Adaptive Self-tuning Computing Systems (ADAPT)}}, title = {{{Easy-to-Use On-The-Fly Binary Program Acceleration on Many-Cores}}}, year = {{2015}}, } @article{296, abstract = {{FPGAs are known to permit huge gains in performance and efficiency for suitable applications but still require reduced design efforts and shorter development cycles for wider adoption. In this work, we compare the resulting performance of two design concepts that in different ways promise such increased productivity. As common starting point, we employ a kernel-centric design approach, where computational hotspots in an application are identified and individually accelerated on FPGA. By means of a complex stereo matching application, we evaluate two fundamentally different design philosophies and approaches for implementing the required kernels on FPGAs. In the first implementation approach, we designed individually specialized data flow kernels in a spatial programming language for a Maxeler FPGA platform; in the alternative design approach, we target a vector coprocessor with large vector lengths, which is implemented as a form of programmable overlay on the application FPGAs of a Convey HC-1. We assess both approaches in terms of overall system performance, raw kernel performance, and performance relative to invested resources. After compensating for the effects of the underlying hardware platforms, the specialized dataflow kernels on the Maxeler platform are around 3x faster than kernels executing on the Convey vector coprocessor. In our concrete scenario, due to trade-offs between reconfiguration overheads and exposed parallelism, the advantage of specialized dataflow kernels is reduced to around 2.5x.}}, author = {{Kenter, Tobias and Schmitz, Henning and Plessl, Christian}}, journal = {{International Journal of Reconfigurable Computing (IJRC)}}, publisher = {{Hindawi}}, title = {{{Exploring Tradeoffs between Specialized Kernels and a Reusable Overlay in a Stereo-Matching Case Study}}}, doi = {{10.1155/2015/859425}}, volume = {{2015}}, year = {{2015}}, } @article{1775, abstract = {{The ATLAS experiment at CERN is planning full deployment of a new unified optical link technology for connecting detector front end electronics on the timescale of the LHC Run 4 (2025). It is estimated that roughly 8000 GBT (GigaBit Transceiver) links, with transfer rates up to 10.24 Gbps, will replace existing links used for readout, detector control and distribution of timing and trigger information. A new class of devices will be needed to interface many GBT links to the rest of the trigger, data-acquisition and detector control systems. In this paper FELIX (Front End LInk eXchange) is presented, a PC-based device to route data from and to multiple GBT links via a high-performance general purpose network capable of a total throughput up to O(20 Tbps). FELIX implies architectural changes to the ATLAS data acquisition system, such as the use of industry standard COTS components early in the DAQ chain. Additionally the design and implementation of a FELIX demonstration platform is presented and hardware and software aspects will be discussed.}}, author = {{Anderson, J and Borga, A and Boterenbrood, H and Chen, H and Chen, K and Drake, G and Francis, D and Gorini, B and Lanni, F and Lehmann Miotto, G and Levinson, L and Narevicius, J and Plessl, Christian and Roich, A and Ryu, S and Schreuder, F and Schumacher, Jörn and Vandelli, Wainer and Vermeulen, J and Zhang, J}}, journal = {{Journal of Physics: Conference Series}}, publisher = {{IOP Publishing}}, title = {{{FELIX: a High-Throughput Network Approach for Interfacing to Front End Electronics for ATLAS Upgrades}}}, doi = {{10.1088/1742-6596/664/8/082050}}, volume = {{664}}, year = {{2015}}, } @inproceedings{1773, author = {{Schumacher, Jörn and T. Anderson, J. and Borga, A. and Boterenbrood, H. and Chen, H. and Chen, K. and Drake, G. and Francis, D. and Gorini, B. and Lanni, F. and Lehmann-Miotto, Giovanna and Levinson, L. and Narevicius, J. and Plessl, Christian and Roich, A. and Ryu, S. and P. Schreuder, F. and Vandelli, Wainer and Vermeulen, J. and Zhang, J.}}, booktitle = {{Proc. Int. Conf. on Distributed Event-Based Systems (DEBS)}}, publisher = {{ACM}}, title = {{{Improving Packet Processing Performance in the ATLAS FELIX Project – Analysis and Optimization of a Memory-Bounded Algorithm}}}, doi = {{10.1145/2675743.2771824}}, year = {{2015}}, } @phdthesis{10624, abstract = {{The use of heterogeneous computing resources, such as graphics processing units or other specialized co-processors, has become widespread in recent years because of their performance and energy efficiency advantages. Operating system approaches that are limited to optimizing CPU usage are no longer sufficient for the efficient utilization of systems that comprise diverse resource types. Enabling task preemption on these architectures and migration of tasks between different resource types at run-time is not only key to improving the performance and energy consumption but also to enabling automatic scheduling methods for heterogeneous compute nodes. This thesis proposes novel techniques for run-time management of heterogeneous resources and enabling tasks to migrate between diverse hardware. It provides fundamental work towards future operating systems by discussing implications, limitations, and chances of the heterogeneity and introducing solutions for energy- and performance-efficient run-time systems. Scheduling methods to utilize heterogeneous systems by the use of a centralized scheduler are presented that show benefits over existing approaches in varying case studies.}}, author = {{Beisel, Tobias}}, isbn = {{978-3-8325-4155-2}}, pages = {{183}}, publisher = {{Logos Verlag Berlin GmbH}}, title = {{{Management and Scheduling of Accelerators for Heterogeneous High-Performance Computing}}}, year = {{2015}}, } @article{1774, abstract = {{In this article an efficient numerical method to solve multiobjective optimization problems for fluid flow governed by the Navier Stokes equations is presented. In order to decrease the computational effort, a reduced order model is introduced using Proper Orthogonal Decomposition and a corresponding Galerkin Projection. A global, derivative free multiobjective optimization algorithm is applied to compute the Pareto set (i.e. the set of optimal compromises) for the concurrent objectives minimization of flow field fluctuations and control cost. The method is illustrated for a 2D flow around a cylinder at Re = 100.}}, author = {{Peitz, Sebastian and Dellnitz, Michael}}, issn = {{1617-7061}}, journal = {{PAMM}}, number = {{1}}, pages = {{613--614}}, publisher = {{WILEY-VCH Verlag}}, title = {{{Multiobjective Optimization of the Flow Around a Cylinder Using Model Order Reduction}}}, doi = {{10.1002/pamm.201510296}}, volume = {{15}}, year = {{2015}}, } @article{1772, author = {{Torresen, Jim and Plessl, Christian and Yao, Xin}}, journal = {{IEEE Computer}}, keywords = {{self-awareness, self-expression}}, number = {{7}}, pages = {{18--20}}, publisher = {{IEEE Computer Society}}, title = {{{Self-Aware and Self-Expressive Systems – Guest Editor's Introduction}}}, doi = {{10.1109/MC.2015.205}}, volume = {{48}}, year = {{2015}}, } @article{1769, abstract = {{Große zylindrische Stahlprüflinge werden mittels der Methode der finiten Differenzen im Zeitbereich (engl. finite differences in time domain, FDTD) simulativ untersucht. Dabei werden Pitch-Catch-Messanordnungen verwendet. Es werden zwei Bildgebungsansätze vorgestellt: ersterer basiert auf dem Imaging Principle nach Claerbout, letzterer basiert auf gradientenbasierter Optimierung eines Zielfunktionals.}}, author = {{Hegler, Sebastian and Statz, Christoph and Mütze, Marco and Mooshofer, Hubert and Goldammer, Matthias and Fendt, Karl and Schwarzer, Stefan and Feldhoff, Kim and Flehmig, Martin and Markwardt, Ulf and E. Nagel, Wolfgang and Schütte, Maria and Walther, Andrea and Meinel, Michael and Basermann, Achim and Plettemeier, Dirk}}, journal = {{tm - Technisches Messen}}, number = {{9}}, pages = {{440--450}}, publisher = {{Walter de Gruyter}}, title = {{{Simulative Ultraschall-Untersuchung von Pitch-Catch-Messanordnungen für große zylindrische Stahl-Prüflinge und gradientenbasierte Bildgebung}}}, doi = {{doi:10.1515/teme-2015-0031}}, volume = {{82}}, year = {{2015}}, } @inproceedings{238, abstract = {{In this paper, we study how binary applications can be transparently accelerated with novel heterogeneous computing resources without requiring any manual porting or developer-provided hints. Our work is based on Binary Acceleration At Runtime (BAAR), our previously introduced binary acceleration mechanism that uses the LLVM Compiler Infrastructure. BAAR is designed as a client-server architecture. The client runs the program to be accelerated in an environment, which allows program analysis and profiling and identifies and extracts suitable program parts to be offloaded. The server compiles and optimizes these offloaded program parts for the accelerator and offers access to these functions to the client with a remote procedure call (RPC) interface. Our previous work proved the feasibility of our approach, but also showed that communication time and overheads limit the granularity of functions that can be meaningfully offloaded. In this work, we motivate the importance of a lightweight, high-performance communication between server and client and present a communication mechanism based on the Message Passing Interface (MPI). We evaluate our approach by using an Intel Xeon Phi 5110P as the acceleration target and show that the communication overhead can be reduced from 40% to 10%, thus enabling even small hotspots to benefit from offloading to an accelerator.}}, author = {{Damschen, Marvin and Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian}}, booktitle = {{Proceedings of the 2015 Conference on Design, Automation and Test in Europe (DATE)}}, pages = {{1078--1083}}, publisher = {{EDA Consortium / IEEE}}, title = {{{Transparent offloading of computational hotspots from binary code to Xeon Phi}}}, doi = {{10.7873/DATE.2015.1124}}, year = {{2015}}, } @article{1779, author = {{Giefers, Heiner and Plessl, Christian and Förstner, Jens}}, issn = {{0163-5964}}, journal = {{ACM SIGARCH Computer Architecture News}}, keywords = {{funding-maxup, tet_topic_hpc}}, number = {{5}}, pages = {{65--70}}, publisher = {{ACM}}, title = {{{Accelerating Finite Difference Time Domain Simulations with Reconfigurable Dataflow Computers}}}, doi = {{10.1145/2641361.2641372}}, volume = {{41}}, year = {{2014}}, } @inproceedings{439, abstract = {{Reconfigurable architectures provide an opportunityto accelerate a wide range of applications, frequentlyby exploiting data-parallelism, where the same operations arehomogeneously executed on a (large) set of data. However, whenthe sequential code is executed on a host CPU and only dataparallelloops are executed on an FPGA coprocessor, a sufficientlylarge number of loop iterations (trip counts) is required, such thatthe control- and data-transfer overheads to the coprocessor canbe amortized. However, the trip count of large data-parallel loopsis frequently not known at compile time, but only at runtime justbefore entering a loop. Therefore, we propose to generate codeboth for the CPU and the coprocessor, and to defer the decisionwhere to execute the appropriate code to the runtime of theapplication when the trip count of the loop can be determinedjust at runtime. We demonstrate how an LLVM compiler basedtoolflow can automatically insert appropriate decision blocks intothe application code. Analyzing popular benchmark suites, weshow that this kind of runtime decisions is often applicable. Thepractical feasibility of our approach is demonstrated by a toolflowthat automatically identifies loops suitable for vectorization andgenerates code for the FPGA coprocessor of a Convey HC-1. Thetoolflow adds decisions based on a comparison of the runtimecomputedtrip counts to thresholds for specific loops and alsoincludes support to move just the required data to the coprocessor.We evaluate the integrated toolflow with characteristic loopsexecuted on different input data sizes.}}, author = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Deferring Accelerator Offloading Decisions to Application Runtime}}}, doi = {{10.1109/ReConFig.2014.7032509}}, year = {{2014}}, } @inproceedings{406, abstract = {{Stereo-matching algorithms recently received a lot of attention from the FPGA acceleration community. Presented solutions range from simple, very resource efficient systems with modest matching quality for small embedded systems to sophisticated algorithms with several processing steps, implemented on big FPGAs. In order to achieve high throughput, most implementations strongly focus on pipelining and data reuse between different computation steps. This approach leads to high efficiency, but limits the supported computation patterns and due the high integration of the implementation, adaptions to the algorithm are difficult. In this work, we present a stereo-matching implementation, that starts by offloading individual kernels from the CPU to the FPGA. Between subsequent compute steps on the FPGA, data is stored off-chip in on-board memory of the FPGA accelerator card. This enables us to accelerate the AD-census algorithm with cross-based aggregation and scanline optimization for the first time without algorithmic changes and for up to full HD image dimensions. Analyzing throughput and bandwidth requirements, we outline some trade-offs that are involved with this approach, compared to tighter integration of more kernel loops into one design.}}, author = {{Kenter, Tobias and Schmitz, Henning and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Kernel-Centric Acceleration of High Accuracy Stereo-Matching}}}, doi = {{10.1109/ReConFig.2014.7032535}}, year = {{2014}}, } @inproceedings{1781, abstract = {{In light of an increasing awareness of environmental challenges, extensive research is underway to develop new light-weight materials. A problem arising with these materials is their increased response to vibration. This can be solved using a new composite material that contains embedded hollow spheres that are partially filled with particles. Progress on the adaptation of molecular dynamics towards a particle-based numerical simulation of this material is reported. This includes the treatment of specific boundary conditions and the adaption of the force computation. First results are presented that showcase the damping properties of such particle-filled spheres in a bouncing experiment.}}, author = {{Steinle, Tobias and Vrabec, Jadran and Walther, Andrea}}, booktitle = {{Proc. Modeling, Simulation and Optimization of Complex Processes (HPSC)}}, editor = {{Bock, Hans Georg and Hoang, Xuan Phu and Rannacher, Rolf and Schlöder, Johannes P.}}, isbn = {{978-3-319-09063-4}}, pages = {{233--243}}, publisher = {{Springer International Publishing}}, title = {{{Numerical Simulation of the Damping Behavior of Particle-Filled Hollow Spheres}}}, doi = {{10.1007/978-3-319-09063-4_19}}, year = {{2014}}, } @inproceedings{1782, author = {{Graf, Tobias and Schaefers, Lars and Platzner, Marco}}, booktitle = {{Proc. Conf. on Computers and Games (CG)}}, number = {{8427}}, pages = {{14--25}}, publisher = {{Springer}}, title = {{{On Semeai Detection in Monte-Carlo Go}}}, doi = {{10.1007/978-3-319-09165-5_2}}, year = {{2014}}, } @inproceedings{388, abstract = {{In order to leverage the use of reconfigurable architectures in general-purpose computing, quick and automated methods to find suitable accelerator designs are required. We tackle this challenge in both regards. In order to avoid long synthesis times, we target a vector copro- cessor, implemented on the FPGAs of a Convey HC-1. Previous studies showed that existing tools were not able to accelerate a real-world application with low effort. We present a toolflow to automatically identify suitable loops for vectorization, generate a corresponding hardware/software bipartition, and generate coprocessor code. Where applicable, we leverage outer-loop vectorization. We evaluate our tools with a set of characteristic loops, systematically analyzing different dependency and data layout properties.}}, author = {{Kenter, Tobias and Vaz, Gavin Francis and Plessl, Christian}}, booktitle = {{Proceedings of the International Symposium on Reconfigurable Computing: Architectures, Tools, and Applications (ARC)}}, pages = {{144--155}}, publisher = {{Springer International Publishing}}, title = {{{Partitioning and Vectorizing Binary Applications for a Reconfigurable Vector Computer}}}, doi = {{10.1007/978-3-319-05960-0_13}}, volume = {{8405}}, year = {{2014}}, } @article{328, abstract = {{The ReconOS operating system for reconfigurable computing offers a unified multi-threaded programming model and operating system services for threads executing in software and threads mapped to reconfigurable hardware. The operating system interface allows hardware threads to interact with software threads using well-known mechanisms such as semaphores, mutexes, condition variables, and message queues. By semantically integrating hardware accelerators into a standard operating system environment, ReconOS allows for rapid design space exploration, supports a structured application development process and improves the portability of applications}}, author = {{Agne, Andreas and Happe, Markus and Keller, Ariane and Lübbers, Enno and Plattner, Bernhard and Platzner, Marco and Plessl, Christian}}, journal = {{IEEE Micro}}, number = {{1}}, pages = {{60--71}}, publisher = {{IEEE}}, title = {{{ReconOS - An Operating System Approach for Reconfigurable Computing}}}, doi = {{10.1109/MM.2013.110}}, volume = {{34}}, year = {{2014}}, } @inproceedings{377, abstract = {{In this paper, we study how AES key schedules can be reconstructed from decayed memory. This operation is a crucial and time consuming operation when trying to break encryption systems with cold-boot attacks. In software, the reconstruction of the AES master key can be performed using a recursive, branch-and-bound tree-search algorithm that exploits redundancies in the key schedule for constraining the search space. In this work, we investigate how this branch-and-bound algorithm can be accelerated with FPGAs. We translated the recursive search procedure to a state machine with an explicit stack for each recursion level and create optimized datapaths to accelerate in particular the processing of the most frequently accessed tree levels. We support two different decay models, of which especially the more realistic non-idealized asymmetric decay model causes very high runtimes in software. Our implementation on a Maxeler dataflow computing system outperforms a software implementation for this model by up to 27x, which makes cold-boot attacks against AES practical even for high error rates.}}, author = {{Riebler, Heinrich and Kenter, Tobias and Plessl, Christian and Sorge, Christoph}}, booktitle = {{Proceedings of Field-Programmable Custom Computing Machines (FCCM)}}, keywords = {{coldboot}}, pages = {{222--229}}, publisher = {{IEEE}}, title = {{{Reconstructing AES Key Schedules from Decayed Memory with FPGAs}}}, doi = {{10.1109/FCCM.2014.67}}, year = {{2014}}, } @inproceedings{1778, author = {{C. Durelli, Gianluca and Pogliani, Marcello and Miele, Antonio and Plessl, Christian and Riebler, Heinrich and Vaz, Gavin Francis and D. Santambrogio, Marco and Bolchini, Cristiana}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing with Applications (ISPA)}}, pages = {{142--149}}, publisher = {{IEEE}}, title = {{{Runtime Resource Management in Heterogeneous System Architectures: The SAVE Approach}}}, doi = {{10.1109/ISPA.2014.27}}, year = {{2014}}, } @inproceedings{1780, author = {{C. Durelli, Gianluca and Copolla, Marcello and Djafarian, Karim and Koranaros, George and Miele, Antonio and Paolino, Michele and Pell, Oliver and Plessl, Christian and D. Santambrogio, Marco and Bolchini, Cristiana}}, booktitle = {{Proc. Int. Conf. on Reconfigurable Computing: Architectures, Tools and Applications (ARC)}}, publisher = {{Springer}}, title = {{{SAVE: Towards efficient resource management in heterogeneous system architectures}}}, doi = {{10.1007/978-3-319-05960-0_38}}, year = {{2014}}, } @article{365, abstract = {{Self-aware computing is a paradigm for structuring and simplifying the design and operation of computing systems that face unprecedented levels of system dynamics and thus require novel forms of adaptivity. The generality of the paradigm makes it applicable to many types of computing systems and, previously, researchers started to introduce concepts of self-awareness to multicore architectures. In our work we build on a recent reference architectural framework as a model for self-aware computing and instantiate it for an FPGA-based heterogeneous multicore running the ReconOS reconfigurable architecture and operating system. After presenting the model for self-aware computing and ReconOS, we demonstrate with a case study how a multicore application built on the principle of self-awareness, autonomously adapts to changes in the workload and system state. Our work shows that the reference architectural framework as a model for self-aware computing can be practically applied and allows us to structure and simplify the design process, which is essential for designing complex future computing systems.}}, author = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems (TRETS)}}, number = {{2}}, publisher = {{ACM}}, title = {{{Self-awareness as a Model for Designing and Operating Heterogeneous Multicores}}}, doi = {{10.1145/2617596}}, volume = {{7}}, year = {{2014}}, } @article{363, abstract = {{Due to the continuously shrinking device structures and increasing densities of FPGAs, thermal aspects have become the new focus for many research projects over the last years. Most researchers rely on temperature simulations to evaluate their novel thermal management techniques. However, these temperature simulations require a high computational effort if a detailed thermal model is used and their accuracies are often unclear. In contrast to simulations, the use of synthetic heat sources allows for experimental evaluation of temperature management methods. In this paper we investigate the creation of significant rises in temperature on modern FPGAs to enable future evaluation of thermal management techniques based on experiments. To that end, we have developed seven different heat-generating cores that use different subsets of FPGA resources. Our experimental results show that, according to external temperature probes connected to the FPGA’s heat sink, we can increase the temperature by an average of 81 !C. This corresponds to an average increase of 156.3 !C as measured by the built-in thermal diodes of our Virtex-5 FPGAs in less than 30 min by only utilizing about 21 percent of the slices.}}, author = {{Agne, Andreas and Hangmann, Hendrik and Happe, Markus and Platzner, Marco and Plessl, Christian}}, journal = {{Microprocessors and Microsystems}}, number = {{8, Part B}}, pages = {{911--919}}, publisher = {{Elsevier}}, title = {{{Seven Recipes for Setting Your FPGA on Fire – A Cookbook on Heat Generators}}}, doi = {{10.1016/j.micpro.2013.12.001}}, volume = {{38}}, year = {{2014}}, } @inbook{335, abstract = {{Im Bereich der Computersysteme ist die Festlegung der Grenze zwischen Hardware und Software eine zentrale Problemstellung. Diese Grenze hat in den letzten Jahrzehnten nicht nur die Entwicklung von Computersystemen bestimmt, sondern auch die Strukturierung der Ausbildung in den Computerwissenschaften beeinflusst und sogar zur Entstehung von neuen Forschungsrichtungen gef{\"u}hrt. In diesem Beitrag besch{\"a}ftigen wir uns mit Verschiebungen an der Grenze zwischen Hardware und Software und diskutieren insgesamt drei qualitativ unterschiedliche Formen solcher Verschiebungen. Wir beginnen mit der Entwicklung von Computersystemen im letzten Jahrhundert und der Entstehung dieser Grenze, die Hardware und Software erst als eigenst{\"a}ndige Produkte differenziert. Dann widmen wir uns der Frage, welche Funktionen in einem Computersystem besser in Hardware und welche besser in Software realisiert werden sollten, eine Fragestellung die zu Beginn der 90er-Jahre zur Bildung einer eigenen Forschungsrichtung, dem sogenannten Hardware/Software Co-design, gef{\"u}hrt hat. Im Hardware/Software Co-design findet eine Verschiebung von Funktionen an der Grenze zwischen Hardware und Software w{\"a}hrend der Entwicklung eines Produktes statt, um Produkteigenschaften zu optimieren. Im fertig entwickelten und eingesetzten Produkt hingegen k{\"o}nnen wir dann eine feste Grenze zwischen Hardware und Software beobachten. Im dritten Teil dieses Beitrags stellen wir mit selbst-adaptiven Systemen eine hochaktuelle Forschungsrichtung vor. In unserem Kontext bedeutet Selbstadaption, dass ein System Verschiebungen von Funktionen an der Grenze zwischen Hardware und Software autonom w{\"a}hrend der Betriebszeit vornimmt. Solche Systeme beruhen auf rekonfigurierbarer Hardware, einer relativ neuen Technologie mit der die Hardware eines Computers w{\"a}hrend der Laufzeit ver{\"a}ndert werden kann. Diese Technologie f{\"u}hrt zu einer durchl{\"a}ssigen Grenze zwischen Hardware und Software bzw. l{\"o}st sie die herk{\"o}mmliche Vorstellung einer festen Hardware und einer flexiblen Software damit auf.}}, author = {{Platzner, Marco and Plessl, Christian}}, booktitle = {{Logiken strukturbildender Prozesse: Automatismen}}, editor = {{Künsemöller, Jörn and Eke, Norber Otto and Foit, Lioba and Kaerlein, Timo}}, isbn = {{978-3-7705-5730-1}}, pages = {{123--144}}, publisher = {{Wilhelm Fink}}, title = {{{Verschiebungen an der Grenze zwischen Hardware und Software}}}, year = {{2014}}, } @phdthesis{1791, author = {{Meister, Dirk}}, publisher = {{Johannes Gutenberg-Universität Mainz}}, title = {{{Advanced Data Deduplication Techniques and Their Application}}}, year = {{2013}}, } @book{1790, author = {{Niehörster, Oliver}}, isbn = {{978-3-8440-1735-9}}, publisher = {{Shaker}}, title = {{{Autonomous Resource Management in Dynamic Data Centers}}}, year = {{2013}}, } @inproceedings{1788, author = {{Berenbrink, Petra and Brinkmann, André and Friedetzky, Tom and Meister, Dirk and Nagel, Lars}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing Workshops (IPDPSW)}}, publisher = {{IEEE}}, title = {{{Distributing Storage in Cloud Environments}}}, doi = {{10.1109/IPDPSW.2013.148}}, year = {{2013}}, } @inproceedings{1793, author = {{Meister, Dirk and Brinkmann, André and Süß, Tim}}, booktitle = {{Proc. USENIX Conference on File and Storage Technologies (FAST)}}, pages = {{175--182}}, publisher = {{USENIX Association}}, title = {{{File Recipe Compression in Data Deduplication Systems}}}, year = {{2013}}, } @inproceedings{1786, author = {{Kasap, Server and Redif, Soydan}}, booktitle = {{Proc. IEEE Signal Processing and Communications Conf. (SUI)}}, publisher = {{IEEE}}, title = {{{FPGA Implementation of a Second-Order Convolutive Blind Signal Separation Algorithm}}}, doi = {{10.1109/SIU.2013.6531530}}, year = {{2013}}, } @inproceedings{528, abstract = {{Cold-boot attacks exploit the fact that DRAM contents are not immediately lost when a PC is powered off. Instead the contents decay rather slowly, in particular if the DRAM chips are cooled to low temperatures. This effect opens an attack vector on cryptographic applications that keep decrypted keys in DRAM. An attacker with access to the target computer can reboot it or remove the RAM modules and quickly copy the RAM contents to non-volatile memory. By exploiting the known cryptographic structure of the cipher and layout of the key data in memory, in our application an AES key schedule with redundancy, the resulting memory image can be searched for sections that could correspond to decayed cryptographic keys; then, the attacker can attempt to reconstruct the original key. However, the runtime of these algorithms grows rapidly with increasing memory image size, error rate and complexity of the bit error model, which limits the practicability of the approach.In this work, we study how the algorithm for key search can be accelerated with custom computing machines. We present an FPGA-based architecture on a Maxeler dataflow computing system that outperforms a software implementation up to 205x, which significantly improves the practicability of cold-attacks against AES.}}, author = {{Riebler, Heinrich and Kenter, Tobias and Sorge, Christoph and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on Field-Programmable Technology (FPT)}}, keywords = {{coldboot}}, pages = {{386--389}}, publisher = {{IEEE}}, title = {{{FPGA-accelerated Key Search for Cold-Boot Attacks against AES}}}, doi = {{10.1109/FPT.2013.6718394}}, year = {{2013}}, } @inproceedings{1784, author = {{Kaiser, Jürgen and Meister, Dirk and Gottfried, Viktor and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Networking, Architecture and Storage (NAS)}}, pages = {{88--97}}, publisher = {{IEEE Computer Society}}, title = {{{MCD: Overcoming the Data Download Bottleneck in Data Centers}}}, doi = {{10.1109/NAS.2013.18}}, year = {{2013}}, } @article{1792, author = {{Kasap, Server and Redif, Soydan}}, journal = {{IEEE Trans. on Very Large Scale Integration (VLSI) Systems}}, number = {{3}}, pages = {{522--536}}, publisher = {{IEEE}}, title = {{{Novel Field-Programmable Gate Array Architecture for Computing the Eigenvalue Decomposition of Para-Hermitian Polynomial Matrices}}}, doi = {{10.1109/TVLSI.2013.2248069}}, volume = {{22}}, year = {{2013}}, } @inproceedings{505, abstract = {{In this paper we introduce “On-The-Fly Computing”, our vision of future IT services that will be provided by assembling modular software components available on world-wide markets. After suitable components have been found, they are automatically integrated, configured and brought to execution in an On-The-Fly Compute Center. We envision that these future compute centers will continue to leverage three current trends in large scale computing which are an increasing amount of parallel processing, a trend to use heterogeneous computing resources, and—in the light of rising energy cost—energy-efficiency as a primary goal in the design and operation of computing systems. In this paper, we point out three research challenges and our current work in these areas.}}, author = {{Happe, Markus and Kling, Peter and Plessl, Christian and Platzner, Marco and Meyer auf der Heide, Friedhelm}}, booktitle = {{Proceedings of the 9th IEEE Workshop on Software Technology for Future embedded and Ubiquitous Systems (SEUS)}}, publisher = {{IEEE}}, title = {{{On-The-Fly Computing: A Novel Paradigm for Individualized IT Services}}}, doi = {{10.1109/ISORC.2013.6913232}}, year = {{2013}}, } @inproceedings{1787, author = {{Suess, Tim and Schoenrock, Andrew and Meisner, Sebastian and Plessl, Christian}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing Workshops (IPDPSW)}}, isbn = {{978-0-7695-4979-8}}, pages = {{64--73}}, publisher = {{IEEE Computer Society}}, title = {{{Parallel Macro Pipelining on the Intel SCC Many-Core Computer}}}, doi = {{10.1109/IPDPSW.2013.136}}, year = {{2013}}, } @inproceedings{2107, author = {{Grunzke, Richard and Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, André and Gesing, Sandra and Herres-Pawlis, Sonja and Kohlbacher, Oliver and Krüger, Jens and Kruse, Martin and Müller-Pfefferkorn, Ralph and Schäfer, Patrick and Schuller, Bernd and Steinke, Thomas and Zink, Andreas}}, booktitle = {{Proc. UNICORE Summit}}, title = {{{A Data Driven Science Gateway for Computational Workflows}}}, year = {{2012}}, } @inproceedings{2178, author = {{Gesing, Sandra and Herres-Pawlis, Sonja and Birkenheuer, Georg and Brinkmann, André and Grunzke, Richard and Kacsuk, Peter and Kohlbacher, Oliver and Kozlovszky, Miklos and Krüger, Jens and Müller-Pfefferkorn, Ralph and Schäfer, Patrick and Steinke, Thomas}}, booktitle = {{Proceedings of Science}}, title = {{{A Science Gateway Getting Ready for Serving the International Molecular Simulation Community}}}, volume = {{PoS(EGICF12-EMITC2)050}}, year = {{2012}}, } @article{2102, author = {{Gesing, Sandra and Grunzke, Richard and Krüger, Jens and Birkenheuer, Georg and Wewior, Martin and Schäfer, Patrick and Schuller, Bernd and Schuster, Johannes and Herres-Pawlis, Sonja and Breuers, Sebastian and Balaskó, Ákos and Kozlovszky, Miklos and Szikszay Fabri, Anna and Packschies, Lars and Kacsuk, Peter and Blunk, Dirk and Steinke, Thomas and Brinkmann, André and Fels, Gregor and Müller-Pfefferkorn, Ralph and Jäkel, René and Kohlbacher, Oliver}}, journal = {{Journal of Grid Computing}}, number = {{4}}, pages = {{769--790}}, publisher = {{Springer}}, title = {{{A Single Sign-On Infrastructure for Science Gateways on a Use Case for Structural Bioinformatics}}}, doi = {{10.1007/s10723-012-9247-y}}, volume = {{10}}, year = {{2012}}, } @inproceedings{2099, author = {{Meister, Dirk and Kaiser, Jürgen and Brinkmann, André and Kuhn, Michael and Kunkel, Julian and Cortes, Toni}}, booktitle = {{Proc. Int. Conf. on Supercomputing (SC)}}, pages = {{7:1--7:11}}, publisher = {{IEEE Computer Society}}, title = {{{A Study on Data Deduplication in HPC Storage Systems}}}, doi = {{10.1109/SC.2012.14}}, year = {{2012}}, } @inproceedings{2103, author = {{Wistuba, Martin and Schaefers, Lars and Platzner, Marco}}, booktitle = {{Proc. IEEE Conf. on Computational Intelligence and Games (CIG)}}, pages = {{91--99}}, publisher = {{IEEE}}, title = {{{Comparison of Bayesian Move Prediction Systems for Computer Go}}}, doi = {{10.1109/CIG.2012.6374143}}, year = {{2012}}, } @inproceedings{2106, abstract = {{Although the benefits of FPGAs for accelerating scientific codes are widely acknowledged, the use of FPGA accelerators in scientific computing is not widespread because reaping these benefits requires knowledge of hardware design methods and tools that is typically not available with domain scientists. A promising but hardly investigated approach is to develop tool flows that keep the common languages for scientific code (C,C++, and Fortran) and allow the developer to augment the source code with OpenMPlike directives for instructing the compiler which parts of the application shall be offloaded the FPGA accelerator. In this work we study whether the promise of effective FPGA acceleration with an OpenMP-like programming effort can actually be held. Our target system is the Convey HC-1 reconfigurable computer for which an OpenMP-like programming environment exists. As case study we use an application from computational nanophotonics. Our results show that a developer without previous FPGA experience could create an FPGA-accelerated application that is competitive to an optimized OpenMP-parallelized CPU version running on a two socket quad-core server. Finally, we discuss our experiences with this tool flow and the Convey HC-1 from a productivity and economic point of view.}}, author = {{Meyer, Björn and Schumacher, Jörn and Plessl, Christian and Förstner, Jens}}, booktitle = {{Proc. Int. Conf. on Field Programmable Logic and Applications (FPL)}}, keywords = {{funding-upb-forschungspreis, funding-maxup, tet_topic_hpc}}, pages = {{189--196}}, publisher = {{IEEE}}, title = {{{Convey Vector Personalities – FPGA Acceleration with an OpenMP-like Effort?}}}, doi = {{10.1109/FPL.2012.6339370}}, year = {{2012}}, } @article{1965, abstract = {{Virtualization technology makes data centers more dynamic and easier to administrate. Today, cloud providers offer customers access to complex applications running on virtualized hardware. Nevertheless, big virtualized data centers become stochastic environments and the simplification on the user side leads to many challenges for the provider. He has to find cost-efficient configurations and has to deal with dynamic environments to ensure service level objectives (SLOs). We introduce a software solution that reduces the degree of human intervention to manage clouds. It is designed as a multi-agent system (MAS) and placed on top of the Infrastructure as a Service (IaaS) layer. Worker agents allocate resources, configure applications, check the feasibility of requests, and generate cost estimates. They are equipped with application specific knowledge allowing it to estimate the type and number of necessary resources. During runtime, a worker agent monitors the job and adapts its resources to ensure the specified quality of service—even in noisy clouds where the job instances are influenced by other jobs. They interact with a scheduler agent, which takes care of limited resources and does a cost-aware scheduling by assigning jobs to times with low costs. The whole architecture is self-optimizing and able to use public or private clouds. Building a private cloud needs to face the challenge to find a mapping of virtual machines (VMs) to hosts. We present a rule-based mapping algorithm for VMs. It offers an interface where policies can be defined and combined in a generic way. The algorithm performs the initial mapping at request time as well as a remapping during runtime. It deals with policy and infrastructure changes. An energy-aware scheduler and the availability of cheap resources provided by a spot market are analyzed. We evaluated our approach by building up an SaaS stack, which assigns resources in consideration of an energy function and that ensures SLOs of two different applications, a brokerage system and a high-performance computing software. Experiments were done on a real cloud system and by simulations.}}, author = {{Niehörster, Oliver and Simon, Jens and Brinkmann, André and Keller, Axel and Krüger, Jens}}, journal = {{Journal of Grid Computing}}, number = {{3}}, pages = {{553--577}}, title = {{{Cost-aware and SLO Fulfilling Software as a Service}}}, doi = {{10.1007/s10723-012-9230-7}}, volume = {{10}}, year = {{2012}}, } @inproceedings{1789, author = {{Kaiser, Jürgen and Meister, Dirk and Brinkmann, André and Effert, Sascha}}, booktitle = {{Proc. Symp. on Mass Storage Systems and Technologies (MSST)}}, pages = {{1--12}}, publisher = {{IEEE}}, title = {{{Design of an exact data deduplication cluster}}}, doi = {{10.1109/MSST.2012.6232380}}, year = {{2012}}, } @inproceedings{615, abstract = {{Due to the continuously shrinking device structures and increasing densities of FPGAs, thermal aspects have become the new focus for many research projects over the last years. Most researchers rely on temperature simulations to evaluate their novel thermal management techniques. However, the accuracy of the simulations is to some extent questionable and they require a high computational effort if a detailed thermal model is used.For experimental evaluation of real-world temperature management methods, often synthetic heat sources are employed. Therefore, in this paper we investigated the question if we can create significant rises in temperature on modern FPGAs to enable future evaluation of thermal management techniques based on experiments in contrast to simulations. Therefore, we have developed eight different heat-generating cores that use different subsets of the FPGA resources. Our experimental results show that, according to the built-in thermal diode of our Xilinx Virtex-5 FPGA, we can increase the chip temperature by 134 degree C in less than 12 minutes by only utilizing about 21% of the slices.}}, author = {{Happe, Markus and Hangmann, Hendrik and Agne, Andreas and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on Reconfigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Eight Ways to put your FPGA on Fire – A Systematic Study of Heat Generators}}}, doi = {{10.1109/ReConFig.2012.6416745}}, year = {{2012}}, } @inproceedings{2098, author = {{Kaiser, Jürgen and Meister, Dirk and Hartung, Tim and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Parallel and Distributed Systems (ICPADS)}}, pages = {{181--188}}, publisher = {{IEEE}}, title = {{{ESB: Ext2 Split Block Device}}}, doi = {{10.1109/ICPADS.2012.34}}, year = {{2012}}, } @inproceedings{612, abstract = {{While numerous publications have presented ring oscillator designs for temperature measurements a detailed study of the ring oscillator's design space is still missing. In this work, we introduce metrics for comparing the performance and area efficiency of ring oscillators and a methodology for determining these metrics. As a result, we present a systematic study of the design space for ring oscillators for a Xilinx Virtex-5 platform FPGA.}}, author = {{Rüthing, Christoph and Happe, Markus and Agne, Andreas and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on Field Programmable Logic and Applications (FPL)}}, pages = {{559--562}}, publisher = {{IEEE}}, title = {{{Exploration of Ring Oscillator Design Space for Temperature Measurements on FPGAs}}}, doi = {{10.1109/FPL.2012.6339370}}, year = {{2012}}, } @inproceedings{2100, author = {{Kasap, Server and Redif, Soydan}}, booktitle = {{Int. Architecture and Engineering Symp. (ARCHENG)}}, title = {{{FPGA implementation of a second-order convolutive blind signal separation algorithm}}}, year = {{2012}}, } @inproceedings{2097, author = {{Kasap, Server and Redif, Soydan}}, booktitle = {{Proc. Int. Conf. on Field Programmable Technology (ICFPT)}}, pages = {{135--140}}, publisher = {{IEEE Computer Society}}, title = {{{FPGA-based design and implementation of an approximate polynomial matrix EVD algorithm}}}, doi = {{10.1109/FPT.2012.6412125}}, year = {{2012}}, } @inproceedings{2104, author = {{Schlemmer, Tobias and Grunzke, Richard and Gesing, Sandra and Krüger, Jens and Birkenheuer, Georg and Müller-Pfefferkorn, Ralph and Kohlbacher, Oliver}}, booktitle = {{Proc. EGI Technical Forum}}, title = {{{Generic User Management for Science Gateways via Virtual Organizations}}}, year = {{2012}}, } @inproceedings{609, abstract = {{Today's design and operation principles and methods do not scale well with future reconfigurable computing systems due to an increased complexity in system architectures and applications, run-time dynamics and corresponding requirements. Hence, novel design and operation principles and methods are needed that possibly break drastically with the static ones we have built into our systems and the fixed abstraction layers we have cherished over the last decades. Thus, we propose a HW/SW platform that collects and maintains information about its state and progress which enables the system to reason about its behavior (self-awareness) and utilizes its knowledge to effectively and autonomously adapt its behavior to changing requirements (self-expression).To enable self-awareness, our compute nodes collect information using a variety of sensors, i.e. performance counters and thermal diodes, and use internal self-awareness models that process these information. For self-awareness, on-line learning is crucial such that the node learns and continuously updates its models at run-time to react to changing conditions. To enable self-expression, we break with the classic design-time abstraction layers of hardware, operating system and software. In contrast, our system is able to vertically migrate functionalities between the layers at run-time to exploit trade-offs between abstraction and optimization.This paper presents a heterogeneous multi-core architecture, that enables self-awareness and self-expression, an operating system for our proposed hardware/software platform and a novel self-expression method.}}, author = {{Happe, Markus and Agne, Andreas and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proceedings of the Workshop on Self-Awareness in Reconfigurable Computing Systems (SRCS)}}, pages = {{8--9}}, title = {{{Hardware/Software Platform for Self-aware Compute Nodes}}}, year = {{2012}}, } @article{2108, author = {{Schumacher, Tobias and Plessl, Christian and Platzner, Marco}}, issn = {{0141-9331}}, journal = {{Microprocessors and Microsystems}}, keywords = {{funding-altera}}, number = {{2}}, pages = {{110--126}}, title = {{{IMORC: An Infrastructure and Architecture Template for Implementing High-Performance Reconfigurable FPGA Accelerators}}}, doi = {{10.1016/j.micpro.2011.04.002}}, volume = {{36}}, year = {{2012}}, } @article{2177, author = {{Grad, Mariusz and Plessl, Christian}}, journal = {{Int. Journal of Reconfigurable Computing (IJRC)}}, publisher = {{Hindawi Publishing Corp.}}, title = {{{On the Feasibility and Limitations of Just-In-Time Instruction Set Extension for FPGA-based Reconfigurable Processors}}}, doi = {{10.1155/2012/418315}}, year = {{2012}}, } @inproceedings{2105, author = {{Congiu, Giuseppe and Grawinkel, Matthias and Narasimhamurthy, Sai and Brinkmann, André}}, booktitle = {{Proc. Workshop on Interfaces and Architectures for Scientific Data Storage (IASDS)}}, pages = {{16--24}}, publisher = {{IEEE}}, title = {{{One Phase Commit: A Low Overhead Atomic Commitment Protocol for Scalable Metadata Services}}}, doi = {{10.1109/ClusterW.2012.16}}, year = {{2012}}, } @article{2173, author = {{Redif, Soydan and Kasap, Server}}, journal = {{Int. Journal of Electronics}}, number = {{12}}, pages = {{1646--1651}}, publisher = {{Taylor & Francis}}, title = {{{Parallel algorithm for computation of second-order sequential best rotations}}}, doi = {{10.1080/00207217.2012.751343}}, volume = {{100}}, year = {{2012}}, } @article{2174, author = {{Kasap, Server and Benkrid, Khaled}}, journal = {{Journal of Computers}}, number = {{6}}, pages = {{1312--1328}}, publisher = {{Academy Publishers}}, title = {{{Parallel Processor Design and Implementation for Molecular Dynamics Simulations on a FPGA Parallel Computer}}}, volume = {{7}}, year = {{2012}}, } @inproceedings{591, abstract = {{One major obstacle for a wide spread FPGA usage in general-purpose computing is the development tool flow that requires much higher effort than for pure software solutions. Convey Computer promises a solution to this problem for their HC-1 platform, where the FPGAs are configured to run as a vector processor and the software source code can be annotated with pragmas that guide an automated vectorization process. We investigate this approach for a stereo matching algorithm that has abundant parallelism and a number of different computational patterns. We note that for this case study the automated vectorization in its current state doesn’t hold its productivity promise. However, we also show that using the Vector Personality can yield a significant speedups compared to CPU implementations in two of three investigated phases of the algorithm. Those speedups don’t match custom FPGA implementations, but can come with much reduced development effort.}}, author = {{Kenter, Tobias and Plessl, Christian and Schmitz, Henning}}, booktitle = {{Proceedings of the International Conference on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{1--8}}, publisher = {{IEEE}}, title = {{{Pragma based parallelization - Trading hardware efficiency for ease of use?}}}, doi = {{10.1109/ReConFig.2012.6416773}}, year = {{2012}}, } @inproceedings{2180, author = {{Beisel, Tobias and Wiersema, Tobias and Plessl, Christian and Brinkmann, André}}, booktitle = {{Proc. Workshop on Computer Architecture and Operating System Co-design (CAOS)}}, keywords = {{funding-enhance}}, title = {{{Programming and Scheduling Model for Supporting Heterogeneous Accelerators in Linux}}}, year = {{2012}}, } @article{2172, author = {{Thielemans, Kris and Tsoumpas, Charalampos and Mustafovic, Sanida and Beisel, Tobias and Aguiar, Pablo and Dikaios, Nikolaos and W Jacobson, Matthew}}, journal = {{Physics in Medicine and Biology}}, number = {{4}}, pages = {{867--883}}, publisher = {{IOP Publishing}}, title = {{{STIR: Software for Tomographic Image Reconstruction Release 2}}}, doi = {{10.1088/0031-9155/57/4/867}}, volume = {{57}}, year = {{2012}}, } @inproceedings{2171, author = {{Gesing, Sandra and Herres-Pawlis, Sonja and Birkenheuer, Georg and Brinkmann, André and Grunzke, Richard and Kacsuk, Peter and Kohlbacher, Oliver and Kozlovszky, Miklos and Krüger, Jens and Müller-Pfefferkorn, Ralph and Schäfer, Patrick and Steinke, Thomas}}, booktitle = {{Proc. EGI Community Forum}}, title = {{{The MoSGrid Community From National to International Scale}}}, year = {{2012}}, } @inproceedings{2101, author = {{Grawinkel, Matthias and Süß, Tim and Best, Georg and Popov, Ivan and Brinkmann, André}}, booktitle = {{Proc. Parallel Data Storage Workshop (PDSW)}}, pages = {{13--17}}, publisher = {{IEEE}}, title = {{{Towards Dynamic Scripted pNFS Layouts}}}, doi = {{10.1109/SC.Companion.2012.13}}, year = {{2012}}, } @inproceedings{567, abstract = {{Heterogeneous machines are gaining momentum in the High Performance Computing field, due to the theoretical speedups and power consumption. In practice, while some applications meet the performance expectations, heterogeneous architectures still require a tremendous effort from the application developers. This work presents a code generation method to port codes into heterogeneous platforms, based on transformations of the control flow into function calls. The results show that the cost of the function-call mechanism is affordable for the tested HPC kernels. The complete toolchain, based on the LLVM compiler infrastructure, is fully automated once the sequential specification is provided.}}, author = {{Barrio, Pablo and Carreras, Carlos and Sierra, Roberto and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on High Performance Computing and Simulation (HPCS)}}, pages = {{559--565}}, publisher = {{IEEE}}, title = {{{Turning control flow graphs into function calls: Code generation for heterogeneous architectures}}}, doi = {{10.1109/HPCSim.2012.6266973}}, year = {{2012}}, } @article{2176, author = {{Herres-Pawlis, Sonja and Birkenheuer, Georg and Brinkmann, André and Gesing, Sandra and Grunzke, Richard and Jäkel, René and Kohlbacher, Oliver and Krüger, Jens and Dos Santos Vieira, Ines}}, journal = {{Studies in Health Technology and Informatics}}, pages = {{142--151}}, publisher = {{IOP Publishing}}, title = {{{Workflow-enhanced conformational analysis of guanidine zinc complexes via a science gateway}}}, doi = {{10.3233/978-1-61499-054-3-142}}, volume = {{175}}, year = {{2012}}, } @inproceedings{2199, author = {{Gesing, Sandra and Kacsuk, Peter and Kozlovszky, Miklos and Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, André and Fels, Gregor and Grunzke, Richard and Herres-Pawlis, Sonja and Krüger, Jens and Packschies, Lars and Müller-Pfefferkorn, Ralph and Schäfer, Patrick and Steinke, Thomas and Szikszay Fabri, Anna and Warzecha, Klaus-Dieter and Wewior, Martin and Kohlbacher, Oliver}}, booktitle = {{Proc. EGI User Forum}}, pages = {{94--95}}, title = {{{A Science Gateway for Molecular Simulations}}}, year = {{2011}}, } @inproceedings{1972, abstract = {{We present a multi-agent system on top of the IaaS layer consisting of a scheduler agent and multiple worker agents. Each job is controlled by an autonomous worker agent, which is equipped with application specific knowledge (e.g., performance functions) allowing it to estimate the type and number of necessary resources. During runtime, the worker agent monitors the job and adapts its resources to ensure the specified quality of service - even in noisy clouds where the job instances are influenced by other jobs. All worker agents interact with the scheduler agent, which takes care of limited resources and does a cost-aware scheduling by assigning jobs to times with low energy costs. The whole architecture is self-optimizing and able to use public or private clouds.}}, author = {{Niehörster, Oliver and Keller, Axel and Brinkmann, André}}, booktitle = {{Proc. Int. Meeting of the IEEE Int. Symp. on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS)}}, title = {{{An Energy-Aware SaaS Stack}}}, doi = {{10.1109/MASCOTS.2011.52}}, year = {{2011}}, } @inproceedings{2190, author = {{Niehörster, Oliver and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Cloud Computing Technology and Science (CloudCom)}}, pages = {{138--145}}, publisher = {{IEEE Computer Society}}, title = {{{Autonomic Resource Management Handling Delayed Configuration Effects}}}, doi = {{10.1109/CloudCom.2011.28}}, year = {{2011}}, } @inproceedings{2203, author = {{Niehörster, Oliver and Simon, Jens and Brinkmann, André and Krieger, Alexaner}}, booktitle = {{Proc. IEEE/ACM Int. Conf. on Grid Computing (GRID)}}, isbn = {{978-0-7695-4572-1}}, pages = {{157--164}}, publisher = {{IEEE Computer Society}}, title = {{{Autonomic Resource Management with Support Vector Machines}}}, doi = {{10.1109/Grid.2011.28}}, year = {{2011}}, } @inproceedings{2193, author = {{Beisel, Tobias and Wiersema, Tobias and Plessl, Christian and Brinkmann, André}}, booktitle = {{Proc. Int. Conf. on Application-Specific Systems, Architectures, and Processors (ASAP)}}, pages = {{223--226}}, publisher = {{IEEE Computer Society}}, title = {{{Cooperative multitasking for heterogeneous accelerators in the Linux Completely Fair Scheduler}}}, doi = {{10.1109/ASAP.2011.6043273}}, year = {{2011}}, } @inproceedings{2191, author = {{Kenter, Tobias and Plessl, Christian and Platzner, Marco and Kauschke, Michael}}, booktitle = {{Intel European Research and Innovation Conference}}, keywords = {{funding-intel}}, title = {{{Estimation and Partitioning for CPU-Accelerator Architectures}}}, year = {{2011}}, } @inproceedings{2195, author = {{Grawinkel, Matthias and Schäfer, Thorsten and Brinkmann, André and Hagemeyer, Jens and Porrmann, Mario}}, booktitle = {{Proc. Int. Symp. on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS)}}, pages = {{297--306}}, publisher = {{IEEE Computer Society}}, title = {{{Evaluation of Applied Intra-Disk Redundancy Schemes to Improve Single Disk Reliability}}}, doi = {{10.1109/mascots.2011.13}}, year = {{2011}}, } @article{2201, author = {{Schumacher, Tobias and Süß, Tim and Plessl, Christian and Platzner, Marco}}, journal = {{Int. Journal of Recon- figurable Computing (IJRC)}}, keywords = {{funding-altera}}, publisher = {{Hindawi Publishing Corp.}}, title = {{{FPGA Acceleration of Communication-bound Streaming Applications: Architecture Modeling and a 3D Image Compositing Case Study}}}, doi = {{10.1155/2011/760954}}, year = {{2011}}, } @inproceedings{2197, author = {{Gesing, Sandra and Grunzke, Richard and Balaskó, Ákos and Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, André and Fels, Gregor and Herres-Pawlis, Sonja and Kacsuk, Peter and Kozlovszky, Miklos and Krüger, Jens and Packschies, Lars and Schäfer, Patrick and Schuller, Bernd and Schuster, Johannes and Steinke, Thomas and Szikszay Fabri, Anna and Wewior, Martin and Müller-Pfefferkorn, Ralph and Kohlbacher, Oliver}}, booktitle = {{Proc. Int. Workshop on Scientific Gateways (IWSG)}}, publisher = {{Consorzio COMETA}}, title = {{{Granular Security for a Science Gateway in Structural Bioinformatics}}}, year = {{2011}}, } @inbook{2202, author = {{Plessl, Christian and Platzner, Marco}}, booktitle = {{Reconfigurable Embedded Control Systems: Applications for Flexibility and Agility}}, editor = {{Khalgui, Mohamed and Hanisch, Hans-Michael}}, isbn = {{978-1-60960-086-0}}, publisher = {{IGI Global}}, title = {{{Hardware Virtualization on Dynamically Reconfigurable Embedded Processors}}}, doi = {{10.4018/978-1-60960-086-0}}, year = {{2011}}, } @article{2192, author = {{Birkenheuer, Georg and Brinkmann, André and Högqvist, Mikael and Papaspyrou, Alexander and Schott, Bernhard and Sommerfeld, Dietmar and Ziegler, Wolfgang}}, journal = {{Journal of Grid Computing}}, number = {{3}}, pages = {{355--377}}, publisher = {{Springer}}, title = {{{Infrastructure Federation Through Virtualized Delegation of Resources and Services}}}, doi = {{10.1007/s10723-011-9192-1}}, volume = {{9}}, year = {{2011}}, } @inproceedings{2198, author = {{Grad, Mariusz and Plessl, Christian}}, booktitle = {{Proc. Reconfigurable Architectures Workshop (RAW)}}, pages = {{278--285}}, publisher = {{IEEE Computer Society}}, title = {{{Just-in-time Instruction Set Extension – Feasibility and Limitations for an FPGA-based Reconfigurable ASIP Architecture}}}, doi = {{10.1109/IPDPS.2011.153}}, year = {{2011}}, } @inproceedings{2189, author = {{Grawinkel, Matthias and Pargmann, Markus and Dömer, Hubert and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Parallel and Distributed Systems (ICPADS)}}, pages = {{380--387}}, publisher = {{IEEE}}, title = {{{Lonestar: An Energy-Aware Disk Based Long-Term Archival Storage System}}}, doi = {{10.1109/ICPADS.2011.77}}, year = {{2011}}, } @inproceedings{656, abstract = {{In the next decades, hybrid multi-cores will be the predominant architecture for reconfigurable FPGA-based systems. Temperature-aware thread mapping strategies are key for providing dependability in such systems. These strategies rely on measuring the temperature distribution and redicting the thermal behavior of the system when there are changes to the hardware and software running on the FPGA. While there are a number of tools that use thermal models to predict temperature distributions at design time, these tools lack the flexibility to autonomously adjust to changing FPGA configurations. To address this problem we propose a temperature-aware system that empowers FPGA-based reconfigurable multi-cores to autonomously predict the on-chip temperature distribution for pro-active thread remapping. Our system obtains temperature measurements through a self-calibrating grid of sensors and uses area constrained heat-generating circuits in order to generate spatial and temporal temperature gradients. The generated temperature variations are then used to learn the free parameters of the system's thermal model. The system thus acquires an understanding of its own thermal characteristics. We implemented an FPGA system containing a net of 144 temperature sensors on a Xilinx Virtex-6 LX240T FPGA that is aware of its thermal model. Finally, we show that the temperature predictions vary less than 0.72 degree C on average compared to the measured temperature distributions at run-time.}}, author = {{Happe, Markus and Agne, Andreas and Plessl, Christian}}, booktitle = {{Proceedings of the 2011 International Conference on Reconfigurable Computing and FPGAs (ReConFig)}}, pages = {{55--60}}, publisher = {{IEEE}}, title = {{{Measuring and Predicting Temperature Distributions on FPGAs at Run-Time}}}, doi = {{10.1109/ReConFig.2011.59}}, year = {{2011}}, } @inproceedings{2205, author = {{Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, André and Fels, Gregor and Gesing, Sandra and Grunzke, Richard and Herres-Pawlis, Sonja and Kohlbacher, Oliver and Krüger, Jens and Lang, Ulrich and Packschies, Lars and Müller-Pfefferkorn, Ralph and Schäfer, Patrick and Schuster, Johannes and Steinke, Thomas and Warzecha, Klaus-Dieter and Wewior, Martin}}, booktitle = {{Proc. of Grid Workflow Workshop (GWW)}}, title = {{{MoSGrid: Progress of Workflow driven Chemical Simulations}}}, volume = {{829}}, year = {{2011}}, } @inproceedings{2204, author = {{Graf, Tobias and Lorenz, Ulf and Platzner, Marco and Schaefers, Lars}}, booktitle = {{Proc. European Conf. on Parallel Processing (Euro-Par)}}, publisher = {{Springer}}, title = {{{Parallel Monte-Carlo Tree Search for HPC Systems}}}, doi = {{10.1007/978-3-642-23397-5_36}}, volume = {{6853}}, year = {{2011}}, } @inproceedings{2200, author = {{Kenter, Tobias and Platzner, Marco and Plessl, Christian and Kauschke, Michael}}, booktitle = {{Proc. Int. Symp. on Field-Programmable Gate Arrays (FPGA)}}, isbn = {{978-1-4503-0554-9}}, keywords = {{design space exploration, LLVM, partitioning, performance, estimation, funding-intel}}, pages = {{177--180}}, publisher = {{ACM}}, title = {{{Performance Estimation Framework for Automated Exploration of CPU-Accelerator Architectures}}}, doi = {{10.1145/1950413.1950448}}, year = {{2011}}, } @inproceedings{2188, author = {{Miranda, Alberto and Effert, Sascha and Kang, Yangwook and Miller, Ethan and Brinkmann, André and Cortes, Toni}}, booktitle = {{Proc. Int. Conf. on High Performance Computing (HIPC)}}, pages = {{1--10}}, publisher = {{IEEE Computer Society}}, title = {{{Reliable and Randomized Data Distribution Strategies for Large Scale Storage Systems}}}, doi = {{10.1109/HiPC.2011.6152745}}, year = {{2011}}, } @inproceedings{2196, author = {{Brinkmann, André and Gao, Yan and Korzeniowski, Miroslaw and Meister, Dirk}}, booktitle = {{Proc. IEEE Int. Conf. on Networking, Architecture and Storage (NAS)}}, pages = {{53--62}}, publisher = {{IEEE}}, title = {{{Request Load Balancing for Highly Skewed Traffic in P2P Networks}}}, doi = {{10.1109/NAS.2011.25}}, year = {{2011}}, } @inproceedings{1968, abstract = {{Infrastructure as a Service providers use virtualization to abstract their hardware and to create a dynamic data center. Virtualization enables the consolidation of virtual machines as well as the migration of them to other hosts during runtime. Each provider has its own strategy to efficiently operate a data center. We present a rule based mapping algorithm for VMs, which is able to automatically adapt the mapping between VMs and physical hosts. It offers an interface where policies can be defined and combined in a generic way. The algorithm performs the initial mapping at request time as well as a remapping during runtime. It deals with policy and infrastructure changes. We extended the open source IaaS solution Eucalyptus and we evaluated it with typical policies: maximizing the compute performance and VM locality to achieve a high performance and minimizing energy consumption. The evaluation was done on state-of-the-art servers in our own data center and by simulations using a workload of the Parallel Workload Archive. The results show that our algorithm performs well in dynamic data centers environments.}}, author = {{Kleineweber, Christoph and Keller, Axel and Niehörster, Oliver and Brinkmann, André}}, booktitle = {{Proc. Int. Conf. on Parallel, Distributed and Network-Based Computing (PDP)}}, title = {{{Rule Based Mapping of Virtual Machines in Clouds}}}, doi = {{10.1109/PDP.2011.69}}, year = {{2011}}, } @inproceedings{2194, author = {{Meyer, Björn and Plessl, Christian and Förstner, Jens}}, booktitle = {{Symp. on Application Accelerators in High Performance Computing (SAAHPC)}}, keywords = {{tet_topic_hpc}}, pages = {{60--63}}, publisher = {{IEEE Computer Society}}, title = {{{Transformation of scientific algorithms to parallel computing code: subdomain support in a MPI-multi-GPU backend}}}, doi = {{10.1109/SAAHPC.2011.12}}, year = {{2011}}, } @article{1971, abstract = {{System virtualization has become the enabling technology to manage the increasing number of different applications inside data centers. The abstraction from the underlying hardware and the provision of multiple virtual machines (VM) on a single physical server have led to a consolidation and more efficient usage of physical servers. The abstraction from the hardware also eases the provision of applications on different data centers, as applied in several cloud computing environments. In this case, the application need not adapt to the environment of the cloud computing provider, but can travel around with its own VM image, including its own operating system and libraries. System virtualization and cloud computing could also be very attractive in the context of high‐performance computing (HPC). Today, HPC centers have to cope with both, the management of the infrastructure and also the applications. Virtualization technology would enable these centers to focus on the infrastructure, while the users, collaborating inside their virtual organizations (VOs), would be able to provide the software. Nevertheless, there seems to be a contradiction between HPC and cloud computing, as there are very few successful approaches to virtualize HPC centers. This work discusses the underlying reasons, including the management and performance, and presents solutions to overcome the contradiction, including a set of new libraries. The viability of the presented approach is shown based on evaluating a selected parallel, scientific application in a virtualized HPC environment. }}, author = {{Birkenheuer, Georg and Brinkmann, André and Kaiser, Jürgen and Keller, Axel and Keller, Matthias and Kleineweber, Christoph and Konersmann, Christoph and Niehörster, Oliver and Schäfer, Thorsten and Simon, Jens and Wilhelm, Maximilan}}, journal = {{Software: Practice and Experience}}, publisher = {{John Wiley & Sons}}, title = {{{Virtualized HPC: a contradiction in terms?}}}, doi = {{10.1002/spe.1055}}, year = {{2011}}, } @inproceedings{2224, author = {{Grad, Mariusz and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on Engineering of Reconfigurable Systems and Algorithms (ERSA)}}, isbn = {{1-60132-140-6}}, pages = {{144--150}}, publisher = {{CSREA Press}}, title = {{{An Open Source Circuit Library with Benchmarking Facilities}}}, year = {{2010}}, } @inproceedings{2229, author = {{Berenbrink, Petra and Brinkmann, André and Friedetzky, Tom and Nagel, Lars}}, booktitle = {{Proc. Int. Symp. on Parallelism in Algorithms and Architectures (SPAA)}}, pages = {{100--105}}, publisher = {{ACM}}, title = {{{Balls into Bins with Related Random Choices}}}, doi = {{10.1145/1810479.1810500}}, year = {{2010}}, } @inproceedings{2232, author = {{Berenbrink, Petra and Brinkmann, André and Friedetzky, Tom and Nagel, Lars}}, booktitle = {{Proc. Int. Symp. on Parallel and Distributed Processing (IPDPS)}}, pages = {{1--10}}, publisher = {{IEEE}}, title = {{{Balls into Non-uniform Bins}}}, doi = {{10.1109/IPDPS.2010.5470355}}, year = {{2010}}, } @inproceedings{2220, author = {{Andrews, David and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on Engineering of Reconfigurable Systems and Algorithms (ERSA)}}, isbn = {{1-60132-140-6}}, pages = {{165}}, publisher = {{CSREA Press}}, title = {{{Configurable Processor Architectures: History and Trends}}}, year = {{2010}}, } @inproceedings{2230, author = {{Meister, Dirk and Brinkmann, André}}, booktitle = {{Proc. Symp. on Mass Storage Systems and Technologies (MSST)}}, pages = {{1--6}}, publisher = {{IEEE Computer Society}}, title = {{{dedupv1: Improving Deduplication Throughput using Solid State Drives (SSD)}}}, doi = {{10.1109/MSST.2010.5496992}}, year = {{2010}}, } @inproceedings{2237, author = {{Niehörster, Oliver and Brinkmann, André and Fels, Gregor and Krüger, Jens and Simon, Jens}}, booktitle = {{Proc. Int. Conf. on Cluster Computing (CLUSTER)}}, issn = {{1552-5244}}, pages = {{178--187}}, publisher = {{IEEE}}, title = {{{Enforcing SLAs in Scientific Clouds}}}, doi = {{10.1109/CLUSTER.2010.42}}, year = {{2010}}, } @inproceedings{2236, author = {{Birkenheuer, Georg and Breuers, Sebastian and Brinkmann, André and Blunk, Dirk and Fels, Gregor and Gesing, Sandra and Herres-Pawlis, Sonja and Kohlbacher, Oliver and Krüger, Jens and Packschies, Lars}}, booktitle = {{Proc. of Grid Workflow Workshop (GWW)}}, pages = {{177--184}}, publisher = {{Gesellschaft für Informatik (GI)}}, title = {{{Grid-Workflows in Molecular Science}}}, year = {{2010}}, } @inproceedings{2231, author = {{Lensing, Paul Hermann and Meister, Dirk and Brinkmann, André}}, booktitle = {{Proc. Int. Worksh. on Storage Network Architecture and Parallel I/Os (SNAPI)}}, pages = {{33--42}}, publisher = {{IEEE}}, title = {{{hashFS: Applying Hashing to Optimized File Systems for Small File Reads}}}, doi = {{10.1109/SNAPI.2010.12}}, year = {{2010}}, } @inproceedings{2234, author = {{Bolte, Matthias and Sievers, Michael and Birkenheuer, Georg and Niehörster, Oliver and Brinkmann, André}}, booktitle = {{Proc. Design, Automation and Test in Europe Conf. (DATE)}}, publisher = {{EDA Consortium}}, title = {{{Non-intrusive Virtualization Management Using libvirt}}}, year = {{2010}}, } @inbook{2233, author = {{R. Prasad, Neeli and Eisenhauer, Markus and Ahlsén, Matts and Badii, Atta and Brinkmann, André and Marius Hansen, Klaus and Rosengren, Peter}}, booktitle = {{Vision and Challenges for Realising the Internet of Things}}, editor = {{Sundmaeker, Harald and Guillemin, Patrick and Friess, Peter and Woelfflé, Sylvie}}, isbn = {{978-92-79-15088-3}}, pages = {{153--163}}, publisher = {{European Commission}}, title = {{{Open Source Middleware for Networked Embedded Systems towards Future Internet of Things}}}, year = {{2010}}, } @inproceedings{2228, author = {{Kenter, Tobias and Platzner, Marco and Plessl, Christian and Kauschke, Michael}}, booktitle = {{Proc. Workshop on Architectural Research Prototyping (WARP), International Symposium on Computer Architecture (ISCA)}}, editor = {{Hammami, Omar and Larrabee, Sandra}}, title = {{{Performance Estimation for the Exploration of CPU-Accelerator Architectures}}}, year = {{2010}}, } @proceedings{2222, editor = {{Plaks, Toomas P. and Andrews, David and DeMara, Ronald and Lam, Herman and Lee, Jooheung and Plessl, Christian and Stitt, Greg}}, isbn = {{1-60132-140-6}}, publisher = {{CSREA Press}}, title = {{{Proc. Int. Conf. on Engineering of Reconfigurable Systems and Algorithms (ERSA)}}}, year = {{2010}}, } @inproceedings{2216, author = {{Grad, Mariusz and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on ReConFigurable Computing and FPGAs (ReConFig)}}, pages = {{67--72}}, publisher = {{IEEE Computer Society}}, title = {{{Pruning the Design Space for Just-In-Time Processor Customization}}}, doi = {{10.1109/ReConFig.2010.19}}, year = {{2010}}, } @inproceedings{2206, author = {{Keller, Ariane and Plattner, Bernhard and Lübbers, Enno and Platzner, Marco and Plessl, Christian}}, booktitle = {{Proc. IEEE Globecom Workshop on Network of the Future (FutureNet)}}, isbn = {{978-1-4244-8864-3}}, pages = {{372--376}}, publisher = {{IEEE}}, title = {{{Reconfigurable Nodes for Future Networks}}}, doi = {{10.1109/GLOCOMW.2010.5700341}}, year = {{2010}}, } @inproceedings{2225, author = {{Gao, Yan and Meister, Dirk and Brinkmann, André}}, booktitle = {{Proc. IEEE Int. Conf. on Networking, Architecture and Storage (NAS)}}, pages = {{126--134}}, publisher = {{IEEE}}, title = {{{Reliability Analysis of Declustered-Parity RAID 6 with Disk Scrubbing and Considering Irrecoverable Read Errors}}}, doi = {{10.1109/NAS.2010.11}}, year = {{2010}}, } @article{2235, author = {{Brinkmann, André and Battré, Dominic and Birkenheuer, Georg and Kao, Odej and Voß, Kerstin}}, journal = {{ForschungsForum Paderborn}}, number = {{13}}, publisher = {{Universität Paderborn}}, title = {{{Risikomanagement für verteilte Umgebungen}}}, volume = {{13}}, year = {{2010}}, } @inproceedings{809, author = {{Birkenheuer, Georg and Brinkmann, Andre and Karl, Holger}}, booktitle = {{Job Scheduling Strategies for Parallel Processing - 15th International Workshop, JSSPP 2010, Atlanta, GA, USA, April 23, 2010, Revised Selected Papers}}, pages = {{51--76}}, title = {{{Risk Aware Overbooking for Commercial Grids}}}, doi = {{10.1007/978-3-642-16505-4_4}}, year = {{2010}}, } @inproceedings{2227, author = {{Woehrle, Matthias and Plessl, Christian and Thiele, Lothar}}, booktitle = {{Proc. Int. Conf. Networked Sensing Systems (INSS)}}, isbn = {{978-1-4244-7911-5}}, pages = {{245--248}}, publisher = {{IEEE}}, title = {{{Rupeas: Ruby Powered Event Analysis DSL}}}, doi = {{10.1109/INSS.2010.5572211}}, year = {{2010}}, } @inproceedings{2217, author = {{Bienkowski, Marcin and Brinkmann, André and Klonowski, Marek and Korzeniowski, Miroslaw}}, booktitle = {{Proceedings of the 14th International Conference On Principles Of Distributed Systems (Opodis)}}, publisher = {{Springer}}, title = {{{SkewCCC+: A Heterogeneous Distributed Hash Table}}}, doi = {{10.1007/978-3-642-17653-1_18}}, volume = {{6490}}, year = {{2010}}, } @inproceedings{2218, author = {{Wewior, Martin and Packschies, Lars and Blunk, Dirk and Wickeroth, Daniel and Warzecha, Klaus-Dieter and Herres-Pawlis, Sonja and Gesing, Sandra and Breuers, Sebastian and Krüger, Jens and Birkenheuer, Georg and Lang, Ulrich}}, booktitle = {{Proc. Int. Workshop on Scientific Gateways (IWSG)}}, pages = {{39--43}}, publisher = {{Consorzio COMETA}}, title = {{{The MoSGrid Gaussian Portlet - Technologies for the Implementation of Portlets for Molecular Simulations}}}, year = {{2010}}, } @inproceedings{2223, author = {{Lübbers, Enno and Platzner, Marco and Plessl, Christian and Keller, Ariane and Plattner, Bernhard}}, booktitle = {{Proc. Int. Conf. on Engineering of Reconfigurable Systems and Algorithms (ERSA)}}, isbn = {{1-60132-140-6}}, pages = {{225--231}}, publisher = {{CSREA Press}}, title = {{{Towards Adaptive Networking for Embedded Devices based on Reconfigurable Hardware}}}, year = {{2010}}, } @inproceedings{2226, author = {{Beisel, Tobias and Niekamp, Manuel and Plessl, Christian}}, booktitle = {{Proc. Int. Conf. on Application-Specific Systems, Architectures, and Processors (ASAP)}}, isbn = {{978-1-4244-6965-9}}, pages = {{65--72}}, publisher = {{IEEE Computer Society}}, title = {{{Using Shared Library Interposing for Transparent Acceleration in Systems with Heterogeneous Hardware Accelerators}}}, doi = {{10.1109/ASAP.2010.5540798}}, year = {{2010}}, } @inproceedings{2219, author = {{Gesing, Sandra and Marton, Istvan and Birkenheuer, Georg and Schuller, Bernd and Grunzke, Richard and Krüger, Jens and Breuers, Sebastian and Blunk, Dirk and Fels, Gregor and Packschies, Lars and Brinkmann, André and Kohlbacher, Oliver and Kozlovszky, Miklos}}, booktitle = {{Proc. Int. Workshop on Scientific Gateways (IWSG)}}, pages = {{44--48}}, publisher = {{Consorzio COMETA}}, title = {{{Workflow Interoperability in a Grid Portal for Molecular Simulations}}}, year = {{2010}}, } @article{2354, author = {{Brinkmann, André and Eschweiler, Dominic}}, journal = {{Journal of Supercomputing}}, pages = {{35:1--35:10}}, publisher = {{ACM}}, title = {{{A Microdriver Architecture for Error Correcting Codes inside the Linux Kernel}}}, doi = {{10.1145/1654059.1654095}}, year = {{2009}}, } @inproceedings{2261, author = {{Schumacher, Tobias and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proc. Int. Conf. on Field Programmable Logic and Applications (FPL)}}, isbn = {{978-1-4244-3892-1}}, issn = {{1946-1488}}, keywords = {{IMORC, NOC, KNN, accelerator}}, pages = {{338--344}}, publisher = {{IEEE}}, title = {{{An Accelerator for k-th Nearest Neighbor Thinning Based on the IMORC Infrastructure}}}, year = {{2009}}, } @inproceedings{2239, author = {{Höing, Andre and Scherp, Guido and Gudenkauf, Stefan and Meister, Dirk and Brinkmann, André}}, booktitle = {{Proc. Int. Conf. on Service Oriented Computing (ICSOC)}}, pages = {{301--315}}, publisher = {{Springer}}, title = {{{An Orchestration as a Service Infrastructure using Grid Technologies and WS-BPEL}}}, doi = {{0.1007/978-3-642-10383-4_20}}, volume = {{5900}}, year = {{2009}}, } @inproceedings{2238, author = {{Schumacher, Tobias and Süß, Tim and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proc. Int. Conf. on ReConFigurable Computing and FPGAs (ReConFig)}}, isbn = {{978-0-7695-3917-1}}, keywords = {{IMORC, graphics}}, pages = {{119--124}}, publisher = {{IEEE Computer Society}}, title = {{{Communication Performance Characterization for Reconfigurable Accelerator Design on the XD1000}}}, doi = {{10.1109/ReConFig.2009.32}}, year = {{2009}}, } @inproceedings{2260, author = {{Birkenheuer, Georg and Carlson, Arthur and Fölling, Alexander and Högqvist, Mikael and Hoheisel, Andreas and Papaspyrou, Alexander and Rieger, Klaus and Schott, Bernhard and Ziegler, Wolfgang}}, booktitle = {{Proc. Cracow Grid Workshop (CGW)}}, isbn = {{978-83-61433-01-9}}, pages = {{96--103}}, title = {{{Connecting Communities on the Meta-Scheduling Level: The DGSI Approach!}}}, year = {{2009}}, } @inproceedings{2262, abstract = {{In this work we present EvoCache, a novel approach for implementing application-specific caches. The key innovation of EvoCache is to make the function that maps memory addresses from the CPU address space to cache indices programmable. We support arbitrary Boolean mapping functions that are implemented within a small reconfigurable logic fabric. For finding suitable cache mapping functions we rely on techniques from the evolvable hardware domain and utilize an evolutionary optimization procedure. We evaluate the use of EvoCache in an embedded processor for two specific applications (JPEG and BZIP2 compression) with respect to execution time, cache miss rate and energy consumption. We show that the evolvable hardware approach for optimizing the cache functions not only significantly improves the cache performance for the training data used during optimization, but that the evolved mapping functions generalize very well. Compared to a conventional cache architecture, EvoCache applied to test data achieves a reduction in execution time of up to 14.31% for JPEG (10.98% for BZIP2), and in energy consumption by 16.43% for JPEG (10.70% for BZIP2). We also discuss the integration of EvoCache into the operating system and show that the area and delay overheads introduced by EvoCache are acceptable. }}, author = {{Kaufmann, Paul and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proc. NASA/ESA Conference on Adaptive Hardware and Systems (AHS)}}, keywords = {{EvoCache, evolvable hardware, computer architecture}}, pages = {{11--18}}, publisher = {{IEEE Computer Society}}, title = {{{EvoCaches: Application-specific Adaptation of Cache Mapping}}}, year = {{2009}}, } @inproceedings{2350, abstract = {{Mapping applications that consist of a collection of cores to FPGA accelerators and optimizing their performance is a challenging task in high performance reconfigurable computing. We present IMORC, an architectural template and highly versatile on-chip interconnect. IMORC links provide asynchronous FIFOs and bitwidth conversion which allows for flexibly composing accelerators from cores running at full speed within their own clock domains, thus facilitating the re-use of cores and portability. Further, IMORC inserts performance counters for monitoring runtime data. In this paper, we first introduce the IMORC architectural template and the on-chip interconnect, and then demonstrate IMORC on the example of accelerating the k-th nearest neighbor thinning problem on an XD1000 reconfigurable computing system. Using IMORC's monitoring infrastructure, we gain insights into the data-dependent behavior of the application which, in turn, allow for optimizing the accelerator. }}, author = {{Schumacher, Tobias and Plessl, Christian and Platzner, Marco}}, booktitle = {{Proc. Int. Symp. on Field-Programmable Custom Computing Machines (FCCM)}}, isbn = {{978-1-4244-4450-2}}, keywords = {{IMORC, interconnect, performance}}, pages = {{275--278}}, publisher = {{IEEE Computer Society}}, title = {{{IMORC: Application Mapping, Monitoring and Optimization for High-Performance Reconfigurable Computing}}}, doi = {{10.1109/FCCM.2009.25}}, year = {{2009}}, } @inproceedings{2264, author = {{Meister, Dirk and Brinkmann, André}}, booktitle = {{Proc. of the Israeli Experimental Systems Conference (SYSTOR)}}, pages = {{8:1--8:12}}, publisher = {{ACM}}, title = {{{Multi-Level Comparison of Data Deduplication in a Backup Scenario}}}, doi = {{10.1145/1534530.1534541}}, year = {{2009}}, } @inproceedings{2352, author = {{Beutel, Jan and Gruber, Stephan and Hasler, Andi and Lim, Roman and Meier, Andreas and Plessl, Christian and Talzi, Igor and Thiele, Lothar and Tschudin, Christian and Woehrle, Matthias and Yuecel, Mustafa}}, booktitle = {{Proc. Int. Conf. on Information Processing in Sensor Networks (IPSN)}}, isbn = {{978-1-4244-5108-1}}, keywords = {{WSN, PermaSense}}, pages = {{265--276}}, publisher = {{IEEE Computer Society}}, title = {{{PermaDAQ: A Scientific Instrument for Precision Sensing and Data Recovery in Environmental Extremes}}}, year = {{2009}}, } @inproceedings{2240, author = {{Niehörster, Oliver and Birkenheuer, Georg and Brinkmann, André and Blunk, Dirk and Elsässer, Brigitta and Herres-Pawlis, Sonja and Krüger, Jens and Niehörster, Julia and Packschies, Lars and Fels, Gregor}}, booktitle = {{Proc. Cracow Grid Workshop (CGW)}}, isbn = {{978-83-61433-01-9}}, pages = {{55--63}}, title = {{{Providing Scientific Software as a Service in Consideration of Service Level Agreements}}}, year = {{2009}}, } @techreport{2353, abstract = {{Wireless Sensor Networks (WSNs) are unique embedded computation systems for distributed sensing of a dispersed phenomenon. While being a strongly concurrent distributed system, its embedded aspects with severe resource limitations and the wireless communication requires a fusion of technologies and methodologies from very different fields. As WSNs are deployed in remote locations for long-term unattended operation, assurance of correct functioning of the system is of prime concern. Thus, the design and development of WSNs requires specialized tools to allow for testing and debugging the system. To this end, we present a framework for analyzing and checking WSNs based on collected events during system operation. It allows for abstracting from the event trace by means of behavioral queries and uses assertions for checking the accordance of an execution to its specification. The framework is independent from WSN test platforms, applications and logging semantics and thus generally applicable for analyzing event logs of WSN test executions. }}, author = {{Woehrle, Matthias and Plessl, Christian and Thiele, Lothar}}, keywords = {{Rupeas, DSL, WSN, testing}}, title = {{{Rupeas: Ruby Powered Event Analysis DSL}}}, year = {{2009}}, } @inproceedings{818, author = {{Birkenheuer, Georg and Brinkmann, Andre and Karl, Holger}}, booktitle = {{Job Scheduling Strategies for Parallel Processing, 14th International Workshop, JSSPP 2009, Rome, Italy, May 29, 2009. Revised Papers}}, pages = {{80--100}}, title = {{{The Gain of Overbooking}}}, doi = {{10.1007/978-3-642-04633-9_5}}, year = {{2009}}, }