@unpublished{32244, abstract = {{We push the boundaries of electronic structure-based \textit{ab-initio} molecular dynamics (AIMD) beyond 100 million atoms. This scale is otherwise barely reachable with classical force-field methods or novel neural network and machine learning potentials. We achieve this breakthrough by combining innovations in linear-scaling AIMD, efficient and approximate sparse linear algebra, low and mixed-precision floating-point computation on GPUs, and a compensation scheme for the errors introduced by numerical approximations. The core of our work is the non-orthogonalized local submatrix method (NOLSM), which scales very favorably to massively parallel computing systems and translates large sparse matrix operations into highly parallel, dense matrix operations that are ideally suited to hardware accelerators. We demonstrate that the NOLSM method, which is at the center point of each AIMD step, is able to achieve a sustained performance of 324 PFLOP/s in mixed FP16/FP32 precision corresponding to an efficiency of 67.7% when running on 1536 NVIDIA A100 GPUs.}}, author = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas D. and Plessl, Christian}}, booktitle = {{arXiv:2104.08245}}, title = {{{Towards Electronic Structure-Based Ab-Initio Molecular Dynamics Simulations with Hundreds of Millions of Atoms}}}, year = {{2021}}, } @inproceedings{27365, author = {{Meyer, Marius}}, booktitle = {{Proceedings of the 11th International Symposium on Highly Efficient Accelerators and Reconfigurable Technologies}}, title = {{{Towards Performance Characterization of FPGAs in Context of HPC using OpenCL Benchmarks}}}, doi = {{10.1145/3468044.3468058}}, year = {{2021}}, } @inproceedings{16898, abstract = {{Electronic structure calculations based on density-functional theory (DFT) represent a significant part of today's HPC workloads and pose high demands on high-performance computing resources. To perform these quantum-mechanical DFT calculations on complex large-scale systems, so-called linear scaling methods instead of conventional cubic scaling methods are required. In this work, we take up the idea of the submatrix method and apply it to the DFT computations in the software package CP2K. For that purpose, we transform the underlying numeric operations on distributed, large, sparse matrices into computations on local, much smaller and nearly dense matrices. This allows us to exploit the full floating-point performance of modern CPUs and to make use of dedicated accelerator hardware, where performance has been limited by memory bandwidth before. We demonstrate both functionality and performance of our implementation and show how it can be accelerated with GPUs and FPGAs.}}, author = {{Lass, Michael and Schade, Robert and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Proc. International Conference for High Performance Computing, Networking, Storage and Analysis (SC)}}, location = {{Atlanta, GA, US}}, pages = {{1127--1140}}, publisher = {{IEEE Computer Society}}, title = {{{A Submatrix-Based Method for Approximate Matrix Function Evaluation in the Quantum Chemistry Code CP2K}}}, doi = {{10.1109/SC41405.2020.00084}}, year = {{2020}}, } @inproceedings{21632, abstract = {{FPGAs have found increasing adoption in data center applications since a new generation of high-level tools have become available which noticeably reduce development time for FPGA accelerators and still provide high-quality results. There is, however, no high-level benchmark suite available, which specifically enables a comparison of FPGA architectures, programming tools, and libraries for HPC applications. To fill this gap, we have developed an OpenCL-based open-source implementation of the HPCC benchmark suite for Xilinx and Intel FPGAs. This benchmark can serve to analyze the current capabilities of FPGA devices, cards, and development tool flows, track progress over time, and point out specific difficulties for FPGA acceleration in the HPC domain. Additionally, the benchmark documents proven performance optimization patterns. We will continue optimizing and porting the benchmark for new generations of FPGAs and design tools and encourage active participation to create a valuable tool for the community. To fill this gap, we have developed an OpenCL-based open-source implementation of the HPCC benchmark suite for Xilinx and Intel FPGAs. This benchmark can serve to analyze the current capabilities of FPGA devices, cards, and development tool flows, track progress over time, and point out specific difficulties for FPGA acceleration in the HPC domain. Additionally, the benchmark documents proven performance optimization patterns. We will continue optimizing and porting the benchmark for new generations of FPGAs and design tools and encourage active participation to create a valuable tool for the community.}}, author = {{Meyer, Marius and Kenter, Tobias and Plessl, Christian}}, booktitle = {{2020 IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}}, isbn = {{9781665415927}}, keywords = {{FPGA, OpenCL, High Level Synthesis, HPC benchmarking}}, title = {{{Evaluating FPGA Accelerator Performance with a Parameterized OpenCL Adaptation of Selected Benchmarks of the HPCChallenge Benchmark Suite}}}, doi = {{10.1109/h2rc51942.2020.00007}}, year = {{2020}}, } @unpublished{32242, abstract = {{We consider a resource-aware variant of the classical multi-armed bandit problem: In each round, the learner selects an arm and determines a resource limit. It then observes a corresponding (random) reward, provided the (random) amount of consumed resources remains below the limit. Otherwise, the observation is censored, i.e., no reward is obtained. For this problem setting, we introduce a measure of regret, which incorporates the actual amount of allocated resources of each learning round as well as the optimality of realizable rewards. Thus, to minimize regret, the learner needs to set a resource limit and choose an arm in such a way that the chance to realize a high reward within the predefined resource limit is high, while the resource limit itself should be kept as low as possible. We derive the theoretical lower bound on the cumulative regret and propose a learning algorithm having a regret upper bound that matches the lower bound. In a simulation study, we show that our learning algorithm outperforms straightforward extensions of standard multi-armed bandit algorithms.}}, author = {{Bengs, Viktor and Hüllermeier, Eyke}}, booktitle = {{arXiv:2011.00813}}, title = {{{Multi-Armed Bandits with Censored Consumption of Resources}}}, year = {{2020}}, } @inproceedings{15478, abstract = {{Stratix 10 FPGA cards have a good potential for the acceleration of HPC workloads since the Stratix 10 product line introduces devices with a large number of DSP and memory blocks. The high level synthesis of OpenCL codes can play a fundamental role for FPGAs in HPC, because it allows to implement different designs with lower development effort compared to hand optimized HDL. However, Stratix 10 cards are still hard to fully exploit using the Intel FPGA SDK for OpenCL. The implementation of designs with thousands of concurrent arithmetic operations often suffers from place and route problems that limit the maximum frequency or entirely prevent a successful synthesis. In order to overcome these issues for the implementation of the matrix multiplication, we formulate Cannon's matrix multiplication algorithm with regard to its efficient synthesis within the FPGA logic. We obtain a two-level block algorithm, where the lower level sub-matrices are multiplied using our Cannon's algorithm implementation. Following this design approach with multiple compute units, we are able to get maximum frequencies close to and above 300 MHz with high utilization of DSP and memory blocks. This allows for performance results above 1 TeraFLOPS.}}, author = {{Gorlani, Paolo and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proceedings of the International Conference on Field-Programmable Technology (FPT)}}, publisher = {{IEEE}}, title = {{{OpenCL Implementation of Cannon's Matrix Multiplication Algorithm on Intel Stratix 10 FPGAs}}}, doi = {{10.1109/ICFPT47387.2019.00020}}, year = {{2019}}, } @inproceedings{22, abstract = {{This paper describes a data structure and a heuristic to plan and map arbitrary resources in complex combinations while applying time dependent constraints. The approach is used in the planning based workload manager OpenCCS at the Paderborn Center for Parallel Computing (PC\(^2\)) to operate heterogeneous clusters with up to 10000 cores. We also show performance results derived from four years of operation.}}, author = {{Keller, Axel}}, booktitle = {{Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)}}, editor = {{Klusáček, D. and Cirne, W. and Desai, N.}}, isbn = {{978-3-319-77398-8}}, keywords = {{Scheduling Planning Mapping Workload management}}, location = {{Orlando, FL, USA}}, pages = {{132--151}}, publisher = {{Springer}}, title = {{{A Data Structure for Planning Based Workload Management of Heterogeneous HPC Systems}}}, doi = {{10.1007/978-3-319-77398-8_8}}, volume = {{10773}}, year = {{2018}}, } @inproceedings{1590, abstract = {{We present the submatrix method, a highly parallelizable method for the approximate calculation of inverse p-th roots of large sparse symmetric matrices which are required in different scientific applications. Following the idea of Approximate Computing, we allow imprecision in the final result in order to utilize the sparsity of the input matrix and to allow massively parallel execution. For an n x n matrix, the proposed algorithm allows to distribute the calculations over n nodes with only little communication overhead. The result matrix exhibits the same sparsity pattern as the input matrix, allowing for efficient reuse of allocated data structures. We evaluate the algorithm with respect to the error that it introduces into calculated results, as well as its performance and scalability. We demonstrate that the error is relatively limited for well-conditioned matrices and that results are still valuable for error-resilient applications like preconditioning even for ill-conditioned matrices. We discuss the execution time and scaling of the algorithm on a theoretical level and present a distributed implementation of the algorithm using MPI and OpenMP. We demonstrate the scalability of this implementation by running it on a high-performance compute cluster comprised of 1024 CPU cores, showing a speedup of 665x compared to single-threaded execution.}}, author = {{Lass, Michael and Mohr, Stephan and Wiebeler, Hendrik and Kühne, Thomas and Plessl, Christian}}, booktitle = {{Proc. Platform for Advanced Scientific Computing (PASC) Conference}}, isbn = {{978-1-4503-5891-0/18/07}}, keywords = {{approximate computing, linear algebra, matrix inversion, matrix p-th roots, numeric algorithm, parallel computing}}, location = {{Basel, Switzerland}}, publisher = {{ACM}}, title = {{{A Massively Parallel Algorithm for the Approximate Calculation of Inverse p-th Roots of Large Sparse Matrices}}}, doi = {{10.1145/3218176.3218231}}, year = {{2018}}, } @inproceedings{1204, author = {{Riebler, Heinrich and Vaz, Gavin Francis and Kenter, Tobias and Plessl, Christian}}, booktitle = {{Proc. ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (PPoPP)}}, isbn = {{9781450349826}}, keywords = {{htrop}}, publisher = {{ACM}}, title = {{{Automated Code Acceleration Targeting Heterogeneous OpenCL Devices}}}, doi = {{10.1145/3178487.3178534}}, year = {{2018}}, } @inproceedings{1588, abstract = {{The exploration of FPGAs as accelerators for scientific simulations has so far mostly been focused on small kernels of methods working on regular data structures, for example in the form of stencil computations for finite difference methods. In computational sciences, often more advanced methods are employed that promise better stability, convergence, locality and scaling. Unstructured meshes are shown to be more effective and more accurate, compared to regular grids, in representing computation domains of various shapes. Using unstructured meshes, the discontinuous Galerkin method preserves the ability to perform explicit local update operations for simulations in the time domain. In this work, we investigate FPGAs as target platform for an implementation of the nodal discontinuous Galerkin method to find time-domain solutions of Maxwell's equations in an unstructured mesh. When maximizing data reuse and fitting constant coefficients into suitably partitioned on-chip memory, high computational intensity allows us to implement and feed wide data paths with hundreds of floating point operators. By decoupling off-chip memory accesses from the computations, high memory bandwidth can be sustained, even for the irregular access pattern required by parts of the application. Using the Intel/Altera OpenCL SDK for FPGAs, we present different implementation variants for different polynomial orders of the method. In different phases of the algorithm, either computational or bandwidth limits of the Arria 10 platform are almost reached, thus outperforming a highly multithreaded CPU implementation by around 2x.}}, author = {{Kenter, Tobias and Mahale, Gopinath and Alhaddad, Samer and Grynko, Yevgen and Schmitt, Christian and Afzal, Ayesha and Hannig, Frank and Förstner, Jens and Plessl, Christian}}, booktitle = {{Proc. Int. Symp. on Field-Programmable Custom Computing Machines (FCCM)}}, keywords = {{tet_topic_hpc}}, publisher = {{IEEE}}, title = {{{OpenCL-based FPGA Design to Accelerate the Nodal Discontinuous Galerkin Method for Unstructured Meshes}}}, doi = {{10.1109/FCCM.2018.00037}}, year = {{2018}}, }