@inbook{54623,
  author       = {{Papenkordt, Jörg}},
  booktitle    = {{Artificial Intelligence in HCI}},
  isbn         = {{9783031606052}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Navigating Transparency: The Influence of On-demand Explanations on Non-expert User Interaction with AI}}},
  doi          = {{10.1007/978-3-031-60606-9_14}},
  year         = {{2024}},
}

@inbook{57323,
  author       = {{Karalis, Nikolaos and Bigerl, Alexander and Demir, Caglar and Heidrich, Liss and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031703645}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Evaluating Negation with Multi-way Joins Accelerates Class Expression Learning}}},
  doi          = {{10.1007/978-3-031-70365-2_12}},
  year         = {{2024}},
}

@inbook{61210,
  abstract     = {{Knowledge graphs (KGs) differ significantly over multiple different versions of the same data source. They also often contain blank nodes that do not have a constant identifier over all versions. Linking such blank nodes from different versions is a challenging task. Previous works propose different approaches to create signatures for all blank nodes based on named nodes in their neighborhood to match blank nodes with similar signatures. However, these works struggle to find a good mapping when the difference between the KGs’ versions grows too large. In this work, we propose Blink, an embedding-based approach for blank node linking. Blink merges two KGs’ versions and embeds the merged graph into a latent vector space based on translational embeddings and subsequently matches the closest pairs of blank nodes from different graphs. We evaluate our approach using real-world datasets against state-of-the-art approaches by computing the blank node matching for isomorphic graphs and graphs that contain triple changes (i.e., added or removed triples). The results indicate that Blink achieves perfect accuracy for isomorphic graphs. For graph versions that contain changes, such as having up to 20% of triples removed in one version, Blink still produces a mapping with an Optimal Mapping Deviation Ratio of under 1%. These results show that Blink leads to a better linking of KGs over different versions and similar graphs adhering to the linked data guidelines.}},
  author       = {{Becker, Alexander and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031778438}},
  issn         = {{0302-9743}},
  location     = {{Baltimore, USA}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Blink: Blank Node Matching Using Embeddings}}},
  doi          = {{10.1007/978-3-031-77844-5_12}},
  year         = {{2024}},
}

@inbook{62067,
  abstract     = {{Most FPGA boards in the HPC domain are well-suited for parallel scaling because of the direct integration of versatile and high-throughput network ports. However, the utilization of their network capabilities is often challenging and error-prone because the whole network stack and communication patterns have to be implemented and managed on the FPGAs. Also, this approach conceptually involves a trade-off between the performance potential of improved communication and the impact of resource consumption for communication infrastructure, since the utilized resources on the FPGAs could otherwise be used for computations. In this work, we investigate this trade-off, firstly, by using synthetic benchmarks to evaluate the different configuration options of the communication framework ACCL and their impact on communication latency and throughput. Finally, we use our findings to implement a shallow water simulation whose scalability heavily depends on low-latency communication. With a suitable configuration of ACCL, good scaling behavior can be shown to all 48 FPGAs installed in the system. Overall, the results show that the availability of inter-FPGA communication frameworks as well as the configurability of framework and network stack are crucial to achieve the best application performance with low latency communication.}},
  author       = {{Meyer, Marius and Kenter, Tobias and Petrica, Lucian and O’Brien, Kenneth and Blott, Michaela and Plessl, Christian}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031697654}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Optimizing Communication for Latency Sensitive HPC Applications on up to 48 FPGAs Using ACCL}}},
  doi          = {{10.1007/978-3-031-69766-1_9}},
  year         = {{2024}},
}

@inbook{53942,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Since its inception two decades ago, <jats:sc>Soot</jats:sc> has become one of the most widely used open-source static analysis frameworks. Over time it has been extended with the contributions of countless researchers. Yet, at the same time, the requirements for <jats:sc>Soot</jats:sc> have changed over the years and become increasingly at odds with some of the major design decisions that underlie it. In this work, we thus present <jats:sc>SootUp</jats:sc>, a complete reimplementation of <jats:sc>Soot</jats:sc> that seeks to fulfill these requirements with a novel design, while at the same time keeping elements that <jats:sc>Soot</jats:sc> users have grown accustomed to.</jats:p>}},
  author       = {{Karakaya, Kadiray and Schott, Stefan and Klauke, Jonas and Bodden, Eric and Schmidt, Markus and Luo, Linghui and He, Dongjie}},
  booktitle    = {{Tools and Algorithms for the Construction and Analysis of Systems}},
  isbn         = {{9783031572456}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{SootUp: A Redesign of the Soot Static Analysis Framework}}},
  doi          = {{10.1007/978-3-031-57246-3_13}},
  year         = {{2024}},
}

@article{47953,
  author       = {{Kornowicz, Jaroslaw and Thommes, Kirsten}},
  isbn         = {{9783031358906}},
  issn         = {{0302-9743}},
  journal      = {{Artificial Intelligence in HCI}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Aggregating Human Domain Knowledge for Feature Ranking}}},
  doi          = {{10.1007/978-3-031-35891-3_7}},
  year         = {{2023}},
}

@inproceedings{50479,
  abstract     = {{Verifying assertions is an essential part of creating and maintaining knowledge graphs. Most often, this task cannot be carried out manually due to the sheer size of modern knowledge graphs. Hence, automatic fact-checking approaches have been proposed over the last decade. These approaches aim to compute automatically whether a given assertion is correct or incorrect. However, most fact-checking approaches are binary classifiers that fail to consider the volatility of some assertions, i.e., the fact that such assertions are only valid at certain times or for specific time intervals. Moreover, the few approaches able to predict when an assertion was valid (i.e., time-point prediction approaches) rely on manual feature engineering. This paper presents TEMPORALFC, a temporal fact-checking approach that uses multiple sources of background knowledge to assess the veracity and temporal validity of a given assertion. We evaluate TEMPORALFC on two datasets and compare it to the state of the art in fact-checking and time-point prediction. Our results suggest that TEMPORALFC outperforms the state of the art on the fact-checking task by 0.13 to 0.15 in terms of Area Under the Receiver Operating Characteristic curve and on the time-point prediction task by 0.25 to 0.27 in terms of Mean Reciprocal Rank. Our code is open-source and can be found at https://github.com/dice-group/TemporalFC.}},
  author       = {{Qudus, Umair and Röder, Michael and Kirrane, Sabrina and Ngomo, Axel-Cyrille Ngonga}},
  booktitle    = {{The Semantic Web – ISWC 2023}},
  editor       = {{R. Payne, Terry and Presutti, Valentina and Qi, Guilin and Poveda-Villalón, María and Stoilos, Giorgos and Hollink, Laura and Kaoudi, Zoi and Cheng, Gong and Li, Juanzi}},
  isbn         = {{9783031472398}},
  issn         = {{0302-9743}},
  keywords     = {{temporal fact checking · ensemble learning · transfer learning · time-point prediction · temporal knowledge graphs}},
  location     = {{Athens, Greece}},
  pages        = {{465–483}},
  publisher    = {{Springer, Cham}},
  title        = {{{TemporalFC: A Temporal Fact Checking Approach over Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-47240-4_25}},
  volume       = {{14265}},
  year         = {{2023}},
}

@inproceedings{51373,
  author       = {{Hanselle, Jonas Manuel and Fürnkranz, Johannes and Hüllermeier, Eyke}},
  booktitle    = {{26th International Conference on Discovery Science }},
  isbn         = {{9783031452741}},
  issn         = {{0302-9743}},
  location     = {{Porto}},
  pages        = {{189--203}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Probabilistic Scoring Lists for Interpretable Machine Learning}}},
  doi          = {{10.1007/978-3-031-45275-8_13}},
  volume       = {{14050}},
  year         = {{2023}},
}

@inbook{47421,
  abstract     = {{Class expression learning in description logics has long been regarded as an iterative search problem in an infinite conceptual space. Each iteration of the search process invokes a reasoner and a heuristic function. The reasoner finds the instances of the current expression, and the heuristic function computes the information gain and decides on the next step to be taken. As the size of the background knowledge base grows, search-based approaches for class expression learning become prohibitively slow. Current neural class expression synthesis (NCES) approaches investigate the use of neural networks for class expression learning in the attributive language with complement (ALC). While they show significant improvements over search-based approaches in runtime and quality of the computed solutions, they rely on the availability of pretrained embeddings for the input knowledge base. Moreover, they are not applicable to ontologies in more expressive description logics. In this paper, we propose a novel NCES approach which extends the state of the art to the description logic ALCHIQ(D). Our extension, dubbed NCES2, comes with an improved training data generator and does not require pretrained embeddings for the input knowledge base as both the embedding model and the class expression synthesizer are trained jointly. Empirical results on benchmark datasets suggest that our approach inherits the scalability capability of current NCES instances with the additional advantage that it supports more complex learning problems. NCES2 achieves the highest performance overall when compared to search-based approaches and to its predecessor NCES. We provide our source code, datasets, and pretrained models at https://github.com/dice-group/NCES2.}},
  author       = {{Kouagou, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track}},
  isbn         = {{9783031434204}},
  issn         = {{0302-9743}},
  location     = {{Turin}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Neural Class Expression Synthesis in ALCHIQ(D)}}},
  doi          = {{10.1007/978-3-031-43421-1_12}},
  year         = {{2023}},
}

@inbook{46516,
  abstract     = {{Linked knowledge graphs build the backbone of many data-driven applications such as search engines, conversational agents and e-commerce solutions. Declarative link discovery frameworks use complex link specifications to express the conditions under which a link between two resources can be deemed to exist. However, understanding such complex link specifications is a challenging task for non-expert users of link discovery frameworks. In this paper, we address this drawback by devising NMV-LS, a language model-based verbalization approach for translating complex link specifications into natural language. NMV-LS relies on the results of rule-based link specification verbalization to apply continuous training on T5, a large language model based on the Transformerarchitecture. We evaluated NMV-LS on English and German datasets using well-known machine translation metrics such as BLUE, METEOR, ChrF++ and TER. Our results suggest that our approach achieves a verbalization performance close to that of humans and outperforms state of the art approaches. Our source code and datasets are publicly available at https://github.com/dice-group/NMV-LS.}},
  author       = {{Ahmed, Abdullah Fathi Ahmed and Firmansyah, Asep Fajar and Sherif, Mohamed and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Natural Language Processing and Information Systems}},
  isbn         = {{9783031353192}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Explainable Integration of Knowledge Graphs Using Large Language Models}}},
  doi          = {{10.1007/978-3-031-35320-8_9}},
  year         = {{2023}},
}

@inbook{54579,
  author       = {{Mahmood, Yasir and Virtema, Jonni}},
  booktitle    = {{Logic, Language, Information, and Computation}},
  isbn         = {{9783031397837}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Parameterized Complexity of Propositional Inclusion and Independence Logic}}},
  doi          = {{10.1007/978-3-031-39784-4_17}},
  year         = {{2023}},
}

@inbook{54909,
  author       = {{Hanselle, Jonas Manuel and Fürnkranz, Johannes and Hüllermeier, Eyke}},
  booktitle    = {{Discovery Science}},
  isbn         = {{9783031452741}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Probabilistic Scoring Lists for Interpretable Machine Learning}}},
  doi          = {{10.1007/978-3-031-45275-8_13}},
  year         = {{2023}},
}

@inproceedings{43395,
  author       = {{Trentinaglia, Roman and Merschjohann, Sven and Fockel, Markus and Eikerling, Hendrik}},
  booktitle    = {{REFSQ 2023: Requirements Engineering: Foundation for Software Quality}},
  isbn         = {{9783031297854}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Eliciting Security Requirements – An Experience Report}}},
  doi          = {{10.1007/978-3-031-29786-1_25}},
  year         = {{2023}},
}

@inbook{44769,
  author       = {{Castenow, Jannik and Harbig, Jonas and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031304477}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Unifying Gathering Protocols for Swarms of Mobile Robots}}},
  doi          = {{10.1007/978-3-031-30448-4_1}},
  year         = {{2023}},
}

@inbook{46867,
  author       = {{Dieter, Peter}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031436116}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{A Regret Policy for the Dynamic Vehicle Routing Problem with Time Windows}}},
  doi          = {{10.1007/978-3-031-43612-3_14}},
  year         = {{2023}},
}

@inbook{57221,
  author       = {{Babu, Sandeep and Jegarian, Majid and Fischer, Dirk and Mertsching, Bärbel}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031433597}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Fast 3D Semantic Segmentation Using a Self Attention Network and Random Sampling}}},
  doi          = {{10.1007/978-3-031-43360-3_21}},
  year         = {{2023}},
}

@inbook{46572,
  abstract     = {{Indonesian is classified as underrepresented in the Natural Language Processing (NLP) field, despite being the tenth most spoken language in the world with 198 million speakers. The paucity of datasets is recognized as the main reason for the slow advancements in NLP research for underrepresented languages. Significant attempts were made in 2020 to address this drawback for Indonesian. The Indonesian Natural Language Understanding (IndoNLU) benchmark was introduced alongside IndoBERT pre-trained language model. The second benchmark, Indonesian Language Evaluation Montage (IndoLEM), was presented in the same year. These benchmarks support several tasks, including Named Entity Recognition (NER). However, all NER datasets are in the public domain and do not contain domain-specific datasets. To alleviate this drawback, we introduce IndQNER, a manually annotated NER benchmark dataset in the religious domain that adheres to a meticulously designed annotation guideline. Since Indonesia has the world’s largest Muslim population, we build the dataset from the Indonesian translation of the Quran. The dataset includes 2475 named entities representing 18 different classes. To assess the annotation quality of IndQNER, we perform experiments with BiLSTM and CRF-based NER, as well as IndoBERT fine-tuning. The results reveal that the first model outperforms the second model achieving 0.98 F1 points. This outcome indicates that IndQNER may be an acceptable evaluation metric for Indonesian NER tasks in the aforementioned domain, widening the research’s domain range.}},
  author       = {{Gusmita, Ria Hari and Firmansyah, Asep Fajar and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Natural Language Processing and Information Systems}},
  isbn         = {{9783031353192}},
  issn         = {{0302-9743}},
  keywords     = {{NER benchmark dataset, Indonesian, specific domain}},
  location     = {{Derby, UK}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{IndQNER: Named Entity Recognition Benchmark Dataset from the Indonesian Translation of the Quran}}},
  doi          = {{10.1007/978-3-031-35320-8_12}},
  year         = {{2023}},
}

@inbook{52859,
  author       = {{de Camargo e Souza Câmara, Igor and Turhan, Anni-Yasmin}},
  booktitle    = {{Logics in Artificial Intelligence}},
  isbn         = {{9783031436185}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Deciding Subsumption in Defeasible 𝓔𝓛𝓘⊥ with Typicality Models}}},
  doi          = {{10.1007/978-3-031-43619-2_36}},
  year         = {{2023}},
}

@inbook{54588,
  author       = {{Manzoor, Ali and Saleem, Muhammad and Moussallem, Diego and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web}},
  isbn         = {{9783031334542}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{RELD: A Knowledge Graph of Relation Extraction Datasets}}},
  doi          = {{10.1007/978-3-031-33455-9_20}},
  year         = {{2023}},
}

@inbook{48776,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track - European Conference (ECML PKDD)}},
  isbn         = {{9783031434174}},
  issn         = {{1611-3349}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{iSAGE: An Incremental Version of SAGE for Online Explanation on Data Streams}}},
  doi          = {{10.1007/978-3-031-43418-1_26}},
  year         = {{2023}},
}

