@unpublished{33493, abstract = {{Electronic structure calculations have been instrumental in providing many important insights into a range of physical and chemical properties of various molecular and solid-state systems. Their importance to various fields, including materials science, chemical sciences, computational chemistry and device physics, is underscored by the large fraction of available public supercomputing resources devoted to these calculations. As we enter the exascale era, exciting new opportunities to increase simulation numbers, sizes, and accuracies present themselves. In order to realize these promises, the community of electronic structure software developers will however first have to tackle a number of challenges pertaining to the efficient use of new architectures that will rely heavily on massive parallelism and hardware accelerators. This roadmap provides a broad overview of the state-of-the-art in electronic structure calculations and of the various new directions being pursued by the community. It covers 14 electronic structure codes, presenting their current status, their development priorities over the next five years, and their plans towards tackling the challenges and leveraging the opportunities presented by the advent of exascale computing.}}, author = {{Gavini, Vikram and Baroni, Stefano and Blum, Volker and Bowler, David R. and Buccheri, Alexander and Chelikowsky, James R. and Das, Sambit and Dawson, William and Delugas, Pietro and Dogan, Mehmet and Draxl, Claudia and Galli, Giulia and Genovese, Luigi and Giannozzi, Paolo and Giantomassi, Matteo and Gonze, Xavier and Govoni, Marco and Gulans, Andris and Gygi, François and Herbert, John M. and Kokott, Sebastian and Kühne, Thomas and Liou, Kai-Hsin and Miyazaki, Tsuyoshi and Motamarri, Phani and Nakata, Ayako and Pask, John E. and Plessl, Christian and Ratcliff, Laura E. and Richard, Ryan M. and Rossi, Mariana and Schade, Robert and Scheffler, Matthias and Schütt, Ole and Suryanarayana, Phanish and Torrent, Marc and Truflandier, Lionel and Windus, Theresa L. and Xu, Qimen and Yu, Victor W. -Z. and Perez, Danny}}, booktitle = {{arXiv:2209.12747}}, title = {{{Roadmap on Electronic Structure Codes in the Exascale Era}}}, year = {{2022}}, } @inproceedings{46193, author = {{Karp, Martin and Podobas, Artur and Kenter, Tobias and Jansson, Niclas and Plessl, Christian and Schlatter, Philipp and Markidis, Stefano}}, booktitle = {{International Conference on High Performance Computing in Asia-Pacific Region}}, publisher = {{ACM}}, title = {{{A High-Fidelity Flow Solver for Unstructured Meshes on Field-Programmable Gate Arrays: Design, Evaluation, and Future Challenges}}}, doi = {{10.1145/3492805.3492808}}, year = {{2022}}, } @unpublished{32404, abstract = {{The CP2K program package, which can be considered as the swiss army knife of atomistic simulations, is presented with a special emphasis on ab-initio molecular dynamics using the second-generation Car-Parrinello method. After outlining current and near-term development efforts with regards to massively parallel low-scaling post-Hartree-Fock and eigenvalue solvers, novel approaches on how we plan to take full advantage of future low-precision hardware architectures are introduced. Our focus here is on combining our submatrix method with the approximate computing paradigm to address the immanent exascale era.}}, author = {{Kühne, Thomas and Plessl, Christian and Schade, Robert and Schütt, Ole}}, booktitle = {{arXiv:2205.14741}}, title = {{{CP2K on the road to exascale}}}, year = {{2022}}, } @article{33226, abstract = {{A parallel hybrid quantum-classical algorithm for the solution of the quantum-chemical ground-state energy problem on gate-based quantum computers is presented. This approach is based on the reduced density-matrix functional theory (RDMFT) formulation of the electronic structure problem. For that purpose, the density-matrix functional of the full system is decomposed into an indirectly coupled sum of density-matrix functionals for all its subsystems using the adaptive cluster approximation to RDMFT. The approximations involved in the decomposition and the adaptive cluster approximation itself can be systematically converged to the exact result. The solutions for the density-matrix functionals of the effective subsystems involves a constrained minimization over many-particle states that are approximated by parametrized trial states on the quantum computer similarly to the variational quantum eigensolver. The independence of the density-matrix functionals of the effective subsystems introduces a new level of parallelization and allows for the computational treatment of much larger molecules on a quantum computer with a given qubit count. In addition, for the proposed algorithm techniques are presented to reduce the qubit count, the number of quantum programs, as well as its depth. The evaluation of a density-matrix functional as the essential part of our approach is demonstrated for Hubbard-like systems on IBM quantum computers based on superconducting transmon qubits.}}, author = {{Schade, Robert and Bauer, Carsten and Tamoev, Konstantin and Mazur, Lukas and Plessl, Christian and Kühne, Thomas}}, journal = {{Phys. Rev. Research}}, pages = {{033160}}, publisher = {{American Physical Society}}, title = {{{Parallel quantum chemistry on noisy intermediate-scale quantum computers}}}, doi = {{10.1103/PhysRevResearch.4.033160}}, volume = {{4}}, year = {{2022}}, } @article{33684, author = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas and Plessl, Christian}}, issn = {{0167-8191}}, journal = {{Parallel Computing}}, keywords = {{Artificial Intelligence, Computer Graphics and Computer-Aided Design, Computer Networks and Communications, Hardware and Architecture, Theoretical Computer Science, Software}}, publisher = {{Elsevier BV}}, title = {{{Towards electronic structure-based ab-initio molecular dynamics simulations with hundreds of millions of atoms}}}, doi = {{10.1016/j.parco.2022.102920}}, volume = {{111}}, year = {{2022}}, } @article{27364, author = {{Meyer, Marius and Kenter, Tobias and Plessl, Christian}}, issn = {{0743-7315}}, journal = {{Journal of Parallel and Distributed Computing}}, title = {{{In-depth FPGA Accelerator Performance Evaluation with Single Node Benchmarks from the HPC Challenge Benchmark Suite for Intel and Xilinx FPGAs using OpenCL}}}, doi = {{10.1016/j.jpdc.2021.10.007}}, year = {{2022}}, } @article{28099, abstract = {{N-body methods are one of the essential algorithmic building blocks of high-performance and parallel computing. Previous research has shown promising performance for implementing n-body simulations with pairwise force calculations on FPGAs. However, to avoid challenges with accumulation and memory access patterns, the presented designs calculate each pair of forces twice, along with both force sums of the involved particles. Also, they require large problem instances with hundreds of thousands of particles to reach their respective peak performance, limiting the applicability for strong scaling scenarios. This work addresses both issues by presenting a novel FPGA design that uses each calculated force twice and overlaps data transfers and computations in a way that allows to reach peak performance even for small problem instances, outperforming previous single precision results even in double precision, and scaling linearly over multiple interconnected FPGAs. For a comparison across architectures, we provide an equally optimized CPU reference, which for large problems actually achieves higher peak performance per device, however, given the strong scaling advantages of the FPGA design, in parallel setups with few thousand particles per device, the FPGA platform achieves highest performance and power efficiency.}}, author = {{Menzel, Johannes and Plessl, Christian and Kenter, Tobias}}, issn = {{1936-7406}}, journal = {{ACM Transactions on Reconfigurable Technology and Systems}}, number = {{1}}, pages = {{1--30}}, title = {{{The Strong Scaling Advantage of FPGAs in HPC for N-body Simulations}}}, doi = {{10.1145/3491235}}, volume = {{15}}, year = {{2021}}, } @inproceedings{46194, author = {{Kenter, Tobias and Shambhu, Adesh and Faghih-Naini, Sara and Aizinger, Vadym}}, booktitle = {{Proceedings of the Platform for Advanced Scientific Computing Conference}}, publisher = {{ACM}}, title = {{{Algorithm-hardware co-design of a discontinuous Galerkin shallow-water model for a dataflow architecture on FPGA}}}, doi = {{10.1145/3468267.3470617}}, year = {{2021}}, } @inproceedings{46195, author = {{Karp, Martin and Podobas, Artur and Jansson, Niclas and Kenter, Tobias and Plessl, Christian and Schlatter, Philipp and Markidis, Stefano}}, booktitle = {{2021 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}}, publisher = {{IEEE}}, title = {{{High-Performance Spectral Element Methods on Field-Programmable Gate Arrays : Implementation, Evaluation, and Future Projection}}}, doi = {{10.1109/ipdps49936.2021.00116}}, year = {{2021}}, } @inbook{21587, abstract = {{Solving partial differential equations on unstructured grids is a cornerstone of engineering and scientific computing. Nowadays, heterogeneous parallel platforms with CPUs, GPUs, and FPGAs enable energy-efficient and computationally demanding simulations. We developed the HighPerMeshes C++-embedded Domain-Specific Language (DSL) for bridging the abstraction gap between the mathematical and algorithmic formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different parallel programming and runtime models on the other hand. Thus, the HighPerMeshes DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HighPerMeshes DSL, and demonstrate its usage with three examples, a Poisson and monodomain problem, respectively, solved by the continuous finite element method, and the discontinuous Galerkin method for Maxwell’s equation. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters, is presented. Finally, the achievable performance and scalability are demonstrated for a typical example problem on a multi-core CPU cluster.}}, author = {{Alhaddad, Samer and Förstner, Jens and Groth, Stefan and Grünewald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, Franz-Josef and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, Jürgen and Weiser, Martin and Wende, Florian}}, booktitle = {{Euro-Par 2020: Parallel Processing Workshops}}, isbn = {{9783030715922}}, issn = {{0302-9743}}, keywords = {{tet_topic_hpc}}, title = {{{HighPerMeshes – A Domain-Specific Language for Numerical Algorithms on Unstructured Grids}}}, doi = {{10.1007/978-3-030-71593-9_15}}, year = {{2021}}, }