@inbook{61210,
  abstract     = {{Knowledge graphs (KGs) differ significantly over multiple different versions of the same data source. They also often contain blank nodes that do not have a constant identifier over all versions. Linking such blank nodes from different versions is a challenging task. Previous works propose different approaches to create signatures for all blank nodes based on named nodes in their neighborhood to match blank nodes with similar signatures. However, these works struggle to find a good mapping when the difference between the KGs’ versions grows too large. In this work, we propose Blink, an embedding-based approach for blank node linking. Blink merges two KGs’ versions and embeds the merged graph into a latent vector space based on translational embeddings and subsequently matches the closest pairs of blank nodes from different graphs. We evaluate our approach using real-world datasets against state-of-the-art approaches by computing the blank node matching for isomorphic graphs and graphs that contain triple changes (i.e., added or removed triples). The results indicate that Blink achieves perfect accuracy for isomorphic graphs. For graph versions that contain changes, such as having up to 20% of triples removed in one version, Blink still produces a mapping with an Optimal Mapping Deviation Ratio of under 1%. These results show that Blink leads to a better linking of KGs over different versions and similar graphs adhering to the linked data guidelines.}},
  author       = {{Becker, Alexander and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031778438}},
  issn         = {{0302-9743}},
  location     = {{Baltimore, USA}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Blink: Blank Node Matching Using Embeddings}}},
  doi          = {{10.1007/978-3-031-77844-5_12}},
  year         = {{2024}},
}

@inproceedings{54084,
  author       = {{Karalis, Nikolaos and Bigerl, Alexander and Heidrich, Liss and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{ESWC}},
  keywords     = {{bigerl dice enexa heidrich karalis ngonga sail sherif}},
  title        = {{{Efficient Evaluation of Conjunctive Regular Path Queries Using Multi-way Joins}}},
  year         = {{2024}},
}

@inproceedings{61219,
  author       = {{Kumar, Ajay and Naumann, Marius and Henne, Kevin and Sherif, Mohamed}},
  booktitle    = {{Joint Proceedings of Posters, Demos, Workshops, and Tutorials of the 20th International Conference on Semantic Systems co-located with 20th International Conference on Semantic Systems (SEMANTiCS 2024), Amsterdam, The Netherlands, September 17-19, 2024}},
  editor       = {{Garijo, Daniel and Gentile, Anna Lisa and Kurteva, Anelia and Mannocci, Andrea and Osborne, Francesco and Vahdati, Sahar}},
  keywords     = {{kumar sherif enexa climatebowl ingrid simba dice whale}},
  location     = {{ Amsterdam,The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{PCFWebUI: Data-driven WebUI for holistic decarbonization based on PCF-Tracking}}},
  volume       = {{3759}},
  year         = {{2024}},
}

@inproceedings{55094,
  author       = {{Zahera, Hamada Mohamed Abdelsamee and Manzoor, Ali and Sherif, Mohamed and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{SEMANTiCS}},
  keywords     = {{TRR318 climatebowl colide dice enexa kiam manzoor moussallem ngonga sailproject sherif simba zahera}},
  title        = {{{Generating SPARQL from Natural Language Using Chain-of-Thoughts Prompting}}},
  year         = {{2024}},
}

@inproceedings{58722,
  abstract     = {{Dialects introduce syntactic and lexical variations in language that occur in regional or social groups. Most NLP methods are not sensitive to such variations. This may lead to unfair behavior of the methods, conveying negative bias towards dialect speakers. While previous work has studied dialect-related fairness for aspects like hate speech, other aspects of biased language, such as lewdness, remain fully unexplored. To fill this gap, we investigate performance disparities between dialects in the detection of five aspects of biased language and how to mitigate them. To alleviate bias, we present a multitask learning approach that models dialect language as an auxiliary task to incorporate syntactic and lexical variations. In our experiments with African-American English dialect, we provide empirical evidence that complementing common learning approaches with dialect modeling improves their fairness. Furthermore, the results suggest that multitask learning achieves state-of-the-art performance and helps to detect properties of biased language more reliably.}},
  author       = {{Spliethöver, Maximilian and Menon, Sai Nikhil and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: ACL 2024}},
  editor       = {{Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek}},
  pages        = {{9294–9313}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Disentangling Dialect from Social Bias via Multitask Learning to Improve Fairness}}},
  doi          = {{10.18653/v1/2024.findings-acl.553}},
  year         = {{2024}},
}

@inbook{61868,
  author       = {{Jonas-Ahrend, Gabriela}},
  booktitle    = {{A Pedagogical View of the COVID-19 Pandemic}},
  isbn         = {{9789004710139}},
  publisher    = {{BRILL}},
  title        = {{{Vocational Teacher Education in a “COVID-19 Semester”}}},
  doi          = {{10.1163/9789004710146_009}},
  year         = {{2024}},
}

@inbook{62067,
  abstract     = {{Most FPGA boards in the HPC domain are well-suited for parallel scaling because of the direct integration of versatile and high-throughput network ports. However, the utilization of their network capabilities is often challenging and error-prone because the whole network stack and communication patterns have to be implemented and managed on the FPGAs. Also, this approach conceptually involves a trade-off between the performance potential of improved communication and the impact of resource consumption for communication infrastructure, since the utilized resources on the FPGAs could otherwise be used for computations. In this work, we investigate this trade-off, firstly, by using synthetic benchmarks to evaluate the different configuration options of the communication framework ACCL and their impact on communication latency and throughput. Finally, we use our findings to implement a shallow water simulation whose scalability heavily depends on low-latency communication. With a suitable configuration of ACCL, good scaling behavior can be shown to all 48 FPGAs installed in the system. Overall, the results show that the availability of inter-FPGA communication frameworks as well as the configurability of framework and network stack are crucial to achieve the best application performance with low latency communication.}},
  author       = {{Meyer, Marius and Kenter, Tobias and Petrica, Lucian and O’Brien, Kenneth and Blott, Michaela and Plessl, Christian}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031697654}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Optimizing Communication for Latency Sensitive HPC Applications on up to 48 FPGAs Using ACCL}}},
  doi          = {{10.1007/978-3-031-69766-1_9}},
  year         = {{2024}},
}

@article{56604,
  abstract     = {{This manuscript makes the claim of having computed the 9th Dedekind number, D(9). This was done by accelerating the core operation of the process with an efficient FPGA design that outperforms an optimized 64-core CPU reference by 95x. The FPGA execution was parallelized on the Noctua 2 supercomputer at Paderborn University. The resulting value for D(9) is 286386577668298411128469151667598498812366. This value can be verified in two steps. We have made the data file containing the 490 M results available, each of which can be verified separately on CPU, and the whole file sums to our proposed value. The paper explains the mathematical approach in the first part, before putting the focus on a deep dive into the FPGA accelerator implementation followed by a performance analysis. The FPGA implementation was done in Register-Transfer Level using a dual-clock architecture and shows how we achieved an impressive FMax of 450 MHz on the targeted Stratix 10 GX 2,800 FPGAs. The total compute time used was 47,000 FPGA hours.}},
  author       = {{Van Hirtum, Lennart and De Causmaecker, Patrick and Goemaere, Jens and Kenter, Tobias and Riebler, Heinrich and Lass, Michael and Plessl, Christian}},
  issn         = {{1936-7406}},
  journal      = {{ACM Transactions on Reconfigurable Technology and Systems}},
  number       = {{3}},
  pages        = {{1--28}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{A Computation of the Ninth Dedekind Number Using FPGA Supercomputing}}},
  doi          = {{10.1145/3674147}},
  volume       = {{17}},
  year         = {{2024}},
}

@inproceedings{53503,
  author       = {{Olgu, Kaan and Kenter, Tobias and Nunez-Yanez, Jose and Mcintosh-Smith, Simon}},
  booktitle    = {{Proceedings of the 12th International Workshop on OpenCL and SYCL}},
  publisher    = {{ACM}},
  title        = {{{Optimisation and Evaluation of Breadth First Search with oneAPI/SYCL on Intel FPGAs: from Describing Algorithms to Describing Architectures}}},
  doi          = {{10.1145/3648115.3648134}},
  year         = {{2024}},
}

@inbook{57834,
  author       = {{Vernholz, Mats}},
  booktitle    = {{Jahrbuch der berufs- und wirtschaftspädagogischen Forschung 2024}},
  editor       = {{Kögler, Kristina and Kremer, H.-Hugo and Herkner, Volkmar}},
  pages        = {{132--147}},
  publisher    = {{Verlag Barbara Budrich}},
  title        = {{{Gewerblich-technische Lehrkräftebildung in Deutschland - Analyse der Einflüsse auf das akademische Selbstkonzept von Lehramtsstudierenden technischer (beruflicher) Fachrichtungen}}},
  doi          = {{10.3224/84743054}},
  year         = {{2024}},
}

@inbook{53942,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Since its inception two decades ago, <jats:sc>Soot</jats:sc> has become one of the most widely used open-source static analysis frameworks. Over time it has been extended with the contributions of countless researchers. Yet, at the same time, the requirements for <jats:sc>Soot</jats:sc> have changed over the years and become increasingly at odds with some of the major design decisions that underlie it. In this work, we thus present <jats:sc>SootUp</jats:sc>, a complete reimplementation of <jats:sc>Soot</jats:sc> that seeks to fulfill these requirements with a novel design, while at the same time keeping elements that <jats:sc>Soot</jats:sc> users have grown accustomed to.</jats:p>}},
  author       = {{Karakaya, Kadiray and Schott, Stefan and Klauke, Jonas and Bodden, Eric and Schmidt, Markus and Luo, Linghui and He, Dongjie}},
  booktitle    = {{Tools and Algorithms for the Construction and Analysis of Systems}},
  isbn         = {{9783031572456}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{SootUp: A Redesign of the Soot Static Analysis Framework}}},
  doi          = {{10.1007/978-3-031-57246-3_13}},
  year         = {{2024}},
}

@inproceedings{57550,
  author       = {{Schott, Stefan and Ponta, Serena Elisa and Fischer, Wolfram and Klauke, Jonas and Bodden, Eric}},
  booktitle    = {{38th European Conference on Object-Oriented Programming (ECOOP 2024)}},
  location     = {{Vienna}},
  title        = {{{Java Bytecode Normalization for Code Similarity Analysis}}},
  doi          = {{10.4230/LIPIcs.ECOOP.2024.37}},
  year         = {{2024}},
}

@inproceedings{58716,
  author       = {{Schott, Stefan and Fischer, Wolfram and Ponta, Serena Elisa and Klauke, Jonas and Bodden, Eric}},
  booktitle    = {{2024 IEEE International Conference on Software Maintenance and Evolution (ICSME)}},
  publisher    = {{IEEE}},
  title        = {{{Compilation of Commit Changes Within Java Source Code Repositories}}},
  doi          = {{10.1109/icsme58944.2024.00038}},
  year         = {{2024}},
}

@misc{59223,
  author       = {{Schwabe, Tobias and Mallick, Khaleda and Singh, Karanveer and Schneider, Thomas and Scheytt, J. Christoph}},
  publisher    = {{Zenodo}},
  title        = {{{Precise optical Nyquist Pulse Synthesizer Digital- to-Analog-Converter presentation 2024 SPP 2111 }}},
  doi          = {{10.5281/zenodo.15114897}},
  year         = {{2024}},
}

@misc{59224,
  author       = {{Schwabe, Tobias and Singh, Karanveer and Schneider, Thomas and Scheytt, J. Christoph}},
  publisher    = {{Zenodo}},
  title        = {{{Precise optical Nyquist Pulse Synthesizer Digital- to-Analog-Converter (PONyDAC II) 2024 SPP 2111 }}},
  doi          = {{10.5281/zenodo.15114631}},
  year         = {{2024}},
}

@inproceedings{57103,
  author       = {{Surendranath Shroff, Vijayalakshmi and Bahmanian, Meysam and Kruse, Stephan and Scheytt, J. Christoph}},
  booktitle    = {{2024 IEEE BiCMOS and Compound Semiconductor Integrated Circuits and Technology Symposium (BCICTS) }},
  location     = {{Fort Lauderdale, Florida}},
  publisher    = {{IEEE}},
  title        = {{{Design of an Ultra-Low Phase Noise Broadband Amplifier in 130 nm SiGe BiCMOS Technology}}},
  doi          = {{10.1109/BCICTS59662.2024.10745663}},
  year         = {{2024}},
}

@inproceedings{57160,
  abstract     = {{Large audio tagging models are usually trained or pre-trained on AudioSet, a dataset that encompasses a large amount of different sound classes and acoustic environments. Knowledge distillation has emerged as a method to compress such models without compromising their effectiveness. There are many different applications for audio tagging, some of which require a specialization to a narrow domain of sounds to be classified. For these scenarios, it is beneficial to distill the large audio tagger with respect to a specific subset of sounds of interest. A method to prune a general dataset with respect to a target dataset is presented. By distilling with such a specialized pruned dataset, we obtain a compressed model with better classification accuracy in the specific target domain than with target-agnostic distillation.}},
  author       = {{Werning, Alexander and Haeb-Umbach, Reinhold}},
  booktitle    = {{32nd European Signal Processing Conference (EUSIPCO 2024)}},
  keywords     = {{data pruning, knowledge distillation, audio tagging}},
  location     = {{Lyon}},
  title        = {{{Target-Specific Dataset Pruning for Compression of Audio Tagging Models}}},
  year         = {{2024}},
}

@inbook{62702,
  abstract     = {{<jats:p>Clifford algebras are a natural extension of division algebras, including real numbers, complex numbers, quaternions, and octonions. Previous research in knowledge graph embeddings has focused exclusively on Clifford algebras of a specific type, which do not include nilpotent base vectors—elements that square to zero. In this work, we introduce a novel approach by incorporating nilpotent base vectors with a nilpotency index of two, leading to a more general form of Clifford algebras named degenerate Clifford algebras. This generalization to degenerate Clifford algebras does allow for covering dual numbers and as such include translations and rotations models under the same generalization paradigm for the first time. We develop two models to determine the parameters that define the algebra: one using a greedy search and another predicting the parameters based on neural network embeddings of the input knowledge graph. Our evaluation on seven benchmark datasets demonstrates that this incorporation of nilpotent vectors enhances the quality of embeddings. Additionally, our method outperforms state-of-the-art approaches in terms of generalization, particularly regarding the mean reciprocal rank achieved on validation data. Finally, we show that even a simple greedy search can effectively discover optimal or near-optimal parameters for the algebra.</jats:p>}},
  author       = {{Kamdem Teyou, Louis Mozart and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Frontiers in Artificial Intelligence and Applications}},
  isbn         = {{9781643685489}},
  issn         = {{0922-6389}},
  location     = {{Santiago de Compostela}},
  publisher    = {{IOS Press}},
  title        = {{{Embedding Knowledge Graphs in Degenerate Clifford Algebras}}},
  doi          = {{10.3233/faia240627}},
  year         = {{2024}},
}

@inproceedings{62703,
  abstract     = {{We introduce a novel embedding method diverging from conventional approaches by operating within function spaces of finite dimension rather than finite vector space, thus departing significantly from standard knowledge graph embedding techniques. Initially employing polynomial functions to compute embeddings, we progress to more intricate representations using neural networks with varying layer complexities. We argue that employing functions for embedding computation enhances expressiveness and allows for more degrees of freedom, enabling operations such as composition, derivatives and primitive of entities representation. Additionally, we meticulously outline the step-by-step construction of our approach and provide code for reproducibility, thereby facilitating further exploration and application in the field.}},
  author       = {{Kamdem Teyou, Louis Mozart and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management}},
  location     = {{Boise}},
  publisher    = {{ACM}},
  title        = {{{Embedding Knowledge Graphs in Function Spaces}}},
  doi          = {{10.1145/3627673.3679819}},
  year         = {{2024}},
}

@inproceedings{56863,
  author       = {{Schiebel, Fabian Benedikt and Sattler, Florian and Schubert, Philipp Dominik and Apel, Sven and Bodden, Eric}},
  booktitle    = {{38th European Conference on Object-Oriented Programming (ECOOP 2024)}},
  editor       = {{Aldrich, Jonathan and Salvaneschi, Guido}},
  isbn         = {{978-3-95977-341-6}},
  issn         = {{1868-8969}},
  pages        = {{36:1–36:28}},
  publisher    = {{Schloss Dagstuhl – Leibniz-Zentrum für Informatik}},
  title        = {{{Scaling Interprocedural Static Data-Flow Analysis to Large C/C++ Applications: An Experience Report}}},
  doi          = {{10.4230/LIPIcs.ECOOP.2024.36}},
  volume       = {{313}},
  year         = {{2024}},
}

