@article{62034,
  abstract     = {{Effective single-particle theories, such as Hartree–Fock, density functional theory, and tight-binding, are limited by the computational cost of the self-consistent field (SCF) procedure, which typically scales cubically with the system size. This makes large-scale applications impractical without specialized algorithms and hardware. Here, we present the submatrix and graphical processing unit (GPU)-accelerated software implementation of the PTB tight-binding potential, realized in the open-source ptb codebase [M. Mueller, A. Katbashev, and S. Ehlert (2025). “grimme-lab/ptb: v3.8.1,” Zenodo. https://zenodo.org/records/17015872]. We first benchmark a traditional diagonalization-based SCF solver against density-matrix-based purification approaches, systematically varying both system size and computer hardware. Our findings show that the usage of GPUs permits shifting the boundaries to much larger systems than previously thought feasible, achieving an overall 10–15-fold performance speedup. Second, we introduce the implementation of a decomposition-type submatrix method, specifically designed for efficient operation on mid- to large-sized systems, to address the computational overhead associated with full-system diagonalization. We demonstrate that, from a certain dimension (≈104 basis functions) on, our submatrix method reduces the overall computational cost while maintaining acceptable numerical accuracy. Our study demonstrates the significance of the interplay between modern hardware, algorithmic considerations, and novel tight-binding methods, paving the way for further development in this direction.}},
  author       = {{Katbashev, Abylay and Schade, Robert and Laß, Michael and Müller, Marcel and Grimme, Stefan and Hansen, Andreas and Kühne, Thomas}},
  issn         = {{0021-9606}},
  journal      = {{The Journal of Chemical Physics}},
  number       = {{13}},
  publisher    = {{AIP Publishing}},
  title        = {{{Submatrix and GPU-accelerated implementation of density matrix tight-binding}}},
  doi          = {{10.1063/5.0271379}},
  volume       = {{163}},
  year         = {{2025}},
}

@article{53474,
  abstract     = {{We present a novel approach to characterize and quantify microheterogeneity and microphase separation in computer simulations of complex liquid mixtures. Our post-processing method is based on local density fluctuations of the different constituents in sampling spheres of varying size. It can be easily applied to both molecular dynamics (MD) and Monte Carlo (MC) simulations, including periodic boundary conditions. Multidimensional correlation of the density distributions yields a clear picture of the domain formation due to the subtle balance of different interactions. We apply our approach to the example of force field molecular dynamics simulations of imidazolium-based ionic liquids with different side chain lengths at different temperatures, namely 1-ethyl-3-methylimidazolium chloride, 1-hexyl-3-methylimidazolium chloride, and 1-decyl-3-methylimidazolium chloride, which are known to form distinct liquid domains. We put the results into the context of existing microheterogeneity analyses and demonstrate the advantages and sensitivity of our novel method. Furthermore, we show how to estimate the configuration entropy from our analysis, and we investigate voids in the system. The analysis has been implemented into our program package TRAVIS and is thus available as free software.}},
  author       = {{Lass, Michael and Kenter, Tobias and Plessl, Christian and Brehm, Martin}},
  issn         = {{1099-4300}},
  journal      = {{Entropy}},
  number       = {{4}},
  publisher    = {{MDPI AG}},
  title        = {{{Characterizing Microheterogeneity in Liquid Mixtures via Local Density Fluctuations}}},
  doi          = {{10.3390/e26040322}},
  volume       = {{26}},
  year         = {{2024}},
}

@article{53663,
  abstract     = {{Noctua 2 is a supercomputer operated at the Paderborn Center for Parallel Computing (PC2) at Paderborn University in Germany. Noctua 2 was inaugurated in 2022 and is an Atos BullSequana XH2000 system. It consists mainly of three node types: 1) CPU Compute nodes with AMD EPYC processors in different main memory configurations, 2) GPU nodes with NVIDIA A100 GPUs, and 3) FPGA nodes with Xilinx Alveo U280 and Intel Stratix 10 FPGA cards. While CPUs and GPUs are known off-the-shelf components in HPC systems, the operation of a large number of FPGA cards from different vendors and a dedicated FPGA-to-FPGA network are unique characteristics of Noctua 2. This paper describes in detail the overall setup of Noctua 2 and gives insights into the operation of the cluster from a hardware, software and facility perspective.}},
  author       = {{Bauer, Carsten and Kenter, Tobias and Lass, Michael and Mazur, Lukas and Meyer, Marius and Nitsche, Holger and Riebler, Heinrich and Schade, Robert and Schwarz, Michael and Winnwa, Nils and Wiens, Alex and Wu, Xin and Plessl, Christian and Simon, Jens}},
  journal      = {{Journal of large-scale research facilities}},
  keywords     = {{Noctua 2, Supercomputer, FPGA, PC2, Paderborn Center for Parallel Computing}},
  title        = {{{Noctua 2 Supercomputer}}},
  doi          = {{10.17815/jlsrf-8-187 }},
  volume       = {{9}},
  year         = {{2024}},
}

@article{56604,
  abstract     = {{This manuscript makes the claim of having computed the 9th Dedekind number, D(9). This was done by accelerating the core operation of the process with an efficient FPGA design that outperforms an optimized 64-core CPU reference by 95x. The FPGA execution was parallelized on the Noctua 2 supercomputer at Paderborn University. The resulting value for D(9) is 286386577668298411128469151667598498812366. This value can be verified in two steps. We have made the data file containing the 490 M results available, each of which can be verified separately on CPU, and the whole file sums to our proposed value. The paper explains the mathematical approach in the first part, before putting the focus on a deep dive into the FPGA accelerator implementation followed by a performance analysis. The FPGA implementation was done in Register-Transfer Level using a dual-clock architecture and shows how we achieved an impressive FMax of 450 MHz on the targeted Stratix 10 GX 2,800 FPGAs. The total compute time used was 47,000 FPGA hours.}},
  author       = {{Van Hirtum, Lennart and De Causmaecker, Patrick and Goemaere, Jens and Kenter, Tobias and Riebler, Heinrich and Lass, Michael and Plessl, Christian}},
  issn         = {{1936-7406}},
  journal      = {{ACM Transactions on Reconfigurable Technology and Systems}},
  number       = {{3}},
  pages        = {{1--28}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{A Computation of the Ninth Dedekind Number Using FPGA Supercomputing}}},
  doi          = {{10.1145/3674147}},
  volume       = {{17}},
  year         = {{2024}},
}

@article{53202,
  abstract     = {{At large scales, quantum systems may become advantageous over their classical counterparts at performing certain tasks. Developing tools to analyze these systems at the relevant scales, in a manner consistent with quantum mechanics, is therefore critical to benchmarking performance and characterizing their operation. While classical computational approaches cannot perform like-for-like computations of quantum systems beyond a certain scale, classical high-performance computing (HPC) may nevertheless be useful for precisely these characterization and certification tasks. By developing open-source customized algorithms using high-performance computing, we perform quantum tomography on a megascale quantum photonic detector covering a Hilbert space of 106. This requires finding 108 elements of the matrix corresponding to the positive operator valued measure (POVM), the quantum description of the detector, and is achieved in minutes of computation time. Moreover, by exploiting the structure of the problem, we achieve highly efficient parallel scaling, paving the way for quantum objects up to a system size of 1012 elements to be reconstructed using this method. In general, this shows that a consistent quantum mechanical description of quantum phenomena is applicable at everyday scales. More concretely, this enables the reconstruction of large-scale quantum sources, processes and detectors used in computation and sampling tasks, which may be necessary to prove their nonclassical character or quantum computational advantage.}},
  author       = {{Schapeler, Timon and Schade, Robert and Lass, Michael and Plessl, Christian and Bartley, Tim}},
  journal      = {{Quantum Science and Technology}},
  number       = {{1}},
  publisher    = {{IOP Publishing}},
  title        = {{{Scalable quantum detector tomography by high-performance computing}}},
  doi          = {{10.1088/2058-9565/ad8511}},
  volume       = {{10}},
  year         = {{2024}},
}

@unpublished{43439,
  abstract     = {{This preprint makes the claim of having computed the $9^{th}$ Dedekind
Number. This was done by building an efficient FPGA Accelerator for the core
operation of the process, and parallelizing it on the Noctua 2 Supercluster at
Paderborn University. The resulting value is
286386577668298411128469151667598498812366. This value can be verified in two
steps. We have made the data file containing the 490M results available, each
of which can be verified separately on CPU, and the whole file sums to our
proposed value.}},
  author       = {{Van Hirtum, Lennart and De Causmaecker, Patrick and Goemaere, Jens and Kenter, Tobias and Riebler, Heinrich and Lass, Michael and Plessl, Christian}},
  booktitle    = {{arXiv:2304.03039}},
  title        = {{{A computation of D(9) using FPGA Supercomputing}}},
  year         = {{2023}},
}

@article{45361,
  abstract     = {{<jats:p> The non-orthogonal local submatrix method applied to electronic structure–based molecular dynamics simulations is shown to exceed 1.1 EFLOP/s in FP16/FP32-mixed floating-point arithmetic when using 4400 NVIDIA A100 GPUs of the Perlmutter system. This is enabled by a modification of the original method that pushes the sustained fraction of the peak performance to about 80%. Example calculations are performed for SARS-CoV-2 spike proteins with up to 83 million atoms. </jats:p>}},
  author       = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Kühne, Thomas and Plessl, Christian}},
  issn         = {{1094-3420}},
  journal      = {{The International Journal of High Performance Computing Applications}},
  keywords     = {{Hardware and Architecture, Theoretical Computer Science, Software}},
  publisher    = {{SAGE Publications}},
  title        = {{{Breaking the exascale barrier for the electronic structure problem in ab-initio molecular dynamics}}},
  doi          = {{10.1177/10943420231177631}},
  year         = {{2023}},
}

@phdthesis{32414,
  author       = {{Lass, Michael}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Bringing Massive Parallelism and Hardware Acceleration to Linear Scaling Density Functional Theory Through Targeted Approximations}}},
  doi          = {{10.17619/UNIPB/1-1281}},
  year         = {{2022}},
}

@article{33684,
  author       = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas and Plessl, Christian}},
  issn         = {{0167-8191}},
  journal      = {{Parallel Computing}},
  keywords     = {{Artificial Intelligence, Computer Graphics and Computer-Aided Design, Computer Networks and Communications, Hardware and Architecture, Theoretical Computer Science, Software}},
  publisher    = {{Elsevier BV}},
  title        = {{{Towards electronic structure-based ab-initio molecular dynamics simulations with hundreds of millions of atoms}}},
  doi          = {{10.1016/j.parco.2022.102920}},
  volume       = {{111}},
  year         = {{2022}},
}

@article{16277,
  abstract     = {{CP2K is an open source electronic structure and molecular dynamics software package to perform atomistic simulations of solid-state, liquid, molecular, and biological systems. It is especially aimed at massively parallel and linear-scaling electronic structure methods and state-of-theart ab initio molecular dynamics simulations. Excellent performance for electronic structure calculations is achieved using novel algorithms implemented for modern high-performance computing systems. This review revisits the main capabilities of CP2K to perform efficient and accurate electronic structure simulations. The emphasis is put on density functional theory and multiple post–Hartree–Fock methods using the Gaussian and plane wave approach and its augmented all-electron extension.}},
  author       = {{Kühne, Thomas and Iannuzzi, Marcella and Ben, Mauro Del and Rybkin, Vladimir V. and Seewald, Patrick and Stein, Frederick and Laino, Teodoro and Khaliullin, Rustam Z. and Schütt, Ole and Schiffmann, Florian and Golze, Dorothea and Wilhelm, Jan and Chulkov, Sergey and Mohammad Hossein Bani-Hashemian, Mohammad Hossein Bani-Hashemian and Weber, Valéry and Borstnik, Urban and Taillefumier, Mathieu and Jakobovits, Alice Shoshana and Lazzaro, Alfio and Pabst, Hans and Müller, Tiziano and Schade, Robert and Guidon, Manuel and Andermatt, Samuel and Holmberg, Nico and Schenter, Gregory K. and Hehn, Anna and Bussy, Augustin and Belleflamme, Fabian and Tabacchi, Gloria and Glöß, Andreas and Lass, Michael and Bethune, Iain and Mundy, Christopher J. and Plessl, Christian and Watkins, Matt and VandeVondele, Joost and Krack, Matthias and Hutter, Jürg}},
  journal      = {{The Journal of Chemical Physics}},
  number       = {{19}},
  title        = {{{CP2K: An electronic structure and molecular dynamics software package - Quickstep: Efficient and accurate electronic structure calculations}}},
  doi          = {{10.1063/5.0007045}},
  volume       = {{152}},
  year         = {{2020}},
}

@inproceedings{16898,
  abstract     = {{Electronic structure calculations based on density-functional theory (DFT)
represent a significant part of today's HPC workloads and pose high demands on
high-performance computing resources. To perform these quantum-mechanical DFT
calculations on complex large-scale systems, so-called linear scaling methods
instead of conventional cubic scaling methods are required. In this work, we
take up the idea of the submatrix method and apply it to the DFT computations
in the software package CP2K. For that purpose, we transform the underlying
numeric operations on distributed, large, sparse matrices into computations on
local, much smaller and nearly dense matrices. This allows us to exploit the
full floating-point performance of modern CPUs and to make use of dedicated
accelerator hardware, where performance has been limited by memory bandwidth
before. We demonstrate both functionality and performance of our implementation
and show how it can be accelerated with GPUs and FPGAs.}},
  author       = {{Lass, Michael and Schade, Robert and Kühne, Thomas and Plessl, Christian}},
  booktitle    = {{Proc. International Conference for High Performance Computing, Networking, Storage and Analysis (SC)}},
  location     = {{Atlanta, GA, US}},
  pages        = {{1127--1140}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{A Submatrix-Based Method for Approximate Matrix Function Evaluation in the Quantum Chemistry Code CP2K}}},
  doi          = {{10.1109/SC41405.2020.00084}},
  year         = {{2020}},
}

@article{12878,
  abstract     = {{In scientific computing, the acceleration of atomistic computer simulations by means of custom hardware is finding ever-growing application. A major limitation, however, is that the high efficiency in terms of performance and low power consumption entails the massive usage of low precision computing units. Here, based on the approximate computing paradigm, we present an algorithmic method to compensate for numerical inaccuracies due to low accuracy arithmetic operations rigorously, yet still obtaining exact expectation values using a properly modified Langevin-type equation.}},
  author       = {{Rengaraj, Varadarajan and Lass, Michael and Plessl, Christian and Kühne, Thomas}},
  journal      = {{Computation}},
  number       = {{2}},
  publisher    = {{MDPI}},
  title        = {{{Accurate Sampling with Noisy Forces from Approximate Computing}}},
  doi          = {{10.3390/computation8020039}},
  volume       = {{8}},
  year         = {{2020}},
}

@article{21,
  abstract     = {{We address the general mathematical problem of computing the inverse p-th
root of a given matrix in an efficient way. A new method to construct iteration
functions that allow calculating arbitrary p-th roots and their inverses of
symmetric positive definite matrices is presented. We show that the order of
convergence is at least quadratic and that adaptively adjusting a parameter q
always leads to an even faster convergence. In this way, a better performance
than with previously known iteration schemes is achieved. The efficiency of the
iterative functions is demonstrated for various matrices with different
densities, condition numbers and spectral radii.}},
  author       = {{Richters, Dorothee and Lass, Michael and Walther, Andrea and Plessl, Christian and Kühne, Thomas}},
  journal      = {{Communications in Computational Physics}},
  number       = {{2}},
  pages        = {{564--585}},
  publisher    = {{Global Science Press}},
  title        = {{{A General Algorithm to Calculate the Inverse Principal p-th Root of Symmetric Positive Definite Matrices}}},
  doi          = {{10.4208/cicp.OA-2018-0053}},
  volume       = {{25}},
  year         = {{2019}},
}

@article{20,
  abstract     = {{Approximate computing has shown to provide new ways to improve performance
and power consumption of error-resilient applications. While many of these
applications can be found in image processing, data classification or machine
learning, we demonstrate its suitability to a problem from scientific
computing. Utilizing the self-correcting behavior of iterative algorithms, we
show that approximate computing can be applied to the calculation of inverse
matrix p-th roots which are required in many applications in scientific
computing. Results show great opportunities to reduce the computational effort
and bandwidth required for the execution of the discussed algorithm, especially
when targeting special accelerator hardware.}},
  author       = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}},
  issn         = {{1943-0671}},
  journal      = {{Embedded Systems Letters}},
  number       = {{2}},
  pages        = {{ 33--36}},
  publisher    = {{IEEE}},
  title        = {{{Using Approximate Computing for the Calculation of Inverse Matrix p-th Roots}}},
  doi          = {{10.1109/LES.2017.2760923}},
  volume       = {{10}},
  year         = {{2018}},
}

@inproceedings{1590,
  abstract     = {{We present the submatrix method, a highly parallelizable method for the approximate calculation of inverse p-th roots of large sparse symmetric matrices which are required in different scientific applications. Following the idea of Approximate Computing, we allow imprecision in the final result in order to utilize the sparsity of the input matrix and to allow massively parallel execution. For an n x n matrix, the proposed algorithm allows to distribute the calculations over n nodes with only little communication overhead. The result matrix exhibits the same sparsity pattern as the input matrix, allowing for efficient reuse of allocated data structures.

We evaluate the algorithm with respect to the error that it introduces into calculated results, as well as its performance and scalability. We demonstrate that the error is relatively limited for well-conditioned matrices and that results are still valuable for error-resilient applications like preconditioning even for ill-conditioned matrices. We discuss the execution time and scaling of the algorithm on a theoretical level and present a distributed implementation of the algorithm using MPI and OpenMP. We demonstrate the scalability of this implementation by running it on a high-performance compute cluster comprised of 1024 CPU cores, showing a speedup of 665x compared to single-threaded execution.}},
  author       = {{Lass, Michael and Mohr, Stephan and Wiebeler, Hendrik and Kühne, Thomas and Plessl, Christian}},
  booktitle    = {{Proc. Platform for Advanced Scientific Computing (PASC) Conference}},
  isbn         = {{978-1-4503-5891-0/18/07}},
  keywords     = {{approximate computing, linear algebra, matrix inversion, matrix p-th roots, numeric algorithm, parallel computing}},
  location     = {{Basel, Switzerland}},
  publisher    = {{ACM}},
  title        = {{{A Massively Parallel Algorithm for the Approximate Calculation of Inverse p-th Roots of Large Sparse Matrices}}},
  doi          = {{10.1145/3218176.3218231}},
  year         = {{2018}},
}

@article{18,
  abstract     = {{Branch and bound (B&B) algorithms structure the search space as a tree and eliminate infeasible solutions early by pruning subtrees that cannot lead to a valid or optimal solution. Custom hardware designs significantly accelerate the execution of these algorithms. In this article, we demonstrate a high-performance B&B implementation on FPGAs. First, we identify general elements of B&B algorithms and describe their implementation as a finite state machine. Then, we introduce workers that autonomously cooperate using work stealing to allow parallel execution and full utilization of the target FPGA. Finally, we explore advantages of instance-specific designs that target a specific problem instance to improve performance.

We evaluate our concepts by applying them to a branch and bound problem, the reconstruction of corrupted AES keys obtained from cold-boot attacks. The evaluation shows that our work stealing approach is scalable with the available resources and provides speedups proportional to the number of workers. Instance-specific designs allow us to achieve an overall speedup of 47 × compared to the fastest implementation of AES key reconstruction so far. Finally, we demonstrate how instance-specific designs can be generated just-in-time such that the provided speedups outweigh the additional time required for design synthesis.}},
  author       = {{Riebler, Heinrich and Lass, Michael and Mittendorf, Robert and Löcke, Thomas and Plessl, Christian}},
  issn         = {{1936-7406}},
  journal      = {{ACM Transactions on Reconfigurable Technology and Systems (TRETS)}},
  keywords     = {{coldboot}},
  number       = {{3}},
  pages        = {{24:1--24:23}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Efficient Branch and Bound on FPGAs Using Work Stealing and Instance-Specific Designs}}},
  doi          = {{10.1145/3053687}},
  volume       = {{10}},
  year         = {{2017}},
}

@inproceedings{19,
  abstract     = {{Version Control Systems (VCS) are a valuable tool for software development
and document management. Both client/server and distributed (Peer-to-Peer)
models exist, with the latter (e.g., Git and Mercurial) becoming
increasingly popular. Their distributed nature introduces complications,
especially concerning security: it is hard to control the dissemination of
contents stored in distributed VCS as they rely on replication of complete
repositories to any involved user.

We overcome this issue by designing and implementing a concept for
cryptography-enforced access control which is transparent to the user. Use
of field-tested schemes (end-to-end encryption, digital signatures) allows
for strong security, while adoption of convergent encryption and
content-defined chunking retains storage efficiency. The concept is
seamlessly integrated into Mercurial---respecting its distributed storage
concept---to ensure practical usability and compatibility to existing
deployments.}},
  author       = {{Lass, Michael and Leibenger, Dominik and Sorge, Christoph}},
  booktitle    = {{Proc. 41st Conference on Local Computer Networks (LCN)}},
  isbn         = {{978-1-5090-2054-6}},
  keywords     = {{access control, distributed version control systems, mercurial, peer-to-peer, convergent encryption, confidentiality, authenticity}},
  publisher    = {{IEEE}},
  title        = {{{Confidentiality and Authenticity for Distributed Version Control Systems - A Mercurial Extension}}},
  doi          = {{10.1109/lcn.2016.11}},
  year         = {{2016}},
}

@inproceedings{25,
  author       = {{Lass, Michael and Kühne, Thomas and Plessl, Christian}},
  booktitle    = {{Workshop on Approximate Computing (AC)}},
  title        = {{{Using Approximate Computing in Scientific Codes}}},
  year         = {{2016}},
}

@misc{1794,
  abstract     = {{Demands for computational power and energy efficiency of computing devices are steadily increasing. At the same time, following classic methods to increase speed and reduce energy consumption of these devices becomes increasingly difficult, bringing alternative methods into focus. One of these methods is approximate computing which utilizes the fact that small errors in computations are acceptable in many applications in order to allow acceleration of these computations or to increase energy efficiency. This thesis develops elements of a workflow that can be followed to apply approximate computing to existing applications. It proposes a novel heuristic approach to the localization of code paths that are suitable to approximate computing based on findings in recent research. Additionally, an approach to identification of approximable instructions within these code paths is proposed and used to implement simulation of approximation. The parts of the workflow are implemented with the goal to lay the foundation for a partly automated toolflow. Evaluation of the developed techniques shows that the proposed methods can help providing a convenient workflow, facilitating the first steps into the application of approximate computing.}},
  author       = {{Lass, Michael}},
  publisher    = {{Paderborn University}},
  title        = {{{Localization and Analysis of Code Paths Suitable for Acceleration using Approximate Computing}}},
  year         = {{2015}},
}

@misc{1795,
  abstract     = {{Distributed revision control is widespread throughout the software industry. Systems like git and mercurial gained a lot of users over the last years and started to supersede central systems like Subversion or CVS in some projects. While restricting access to those central systems is basically possible, it is difficult to control the propagation of contents in a distributed revision control system because every user has a local copy of the whole repository. In this thesis a concept is developed and implemented that allows secure storage of confidential data in a distributed revision control system and enables users to manage read and write permissions on single confidential files. Therefore different cryptographic methods are used, such as asymmetric encryption, digital signatures and convergent encryption. These techniques are applied in a manner that fits the special requirements of a revision control system and allows a space efficient storage of changes to the encrypted files.}},
  author       = {{Lass, Michael}},
  publisher    = {{Paderborn University}},
  title        = {{{Sichere Speicherung vertraulicher Daten in verteilten Versionskontrollsystemen}}},
  year         = {{2013}},
}

