@article{60990,
  abstract     = {{Large Language Models (LLMs) have demonstrated remarkable performance across a wide range of natural language processing tasks. However, their effectiveness in low-resource languages remains underexplored, particularly in complex tasks such as end-to-end Entity Linking (EL), which requires both mention detection and disambiguation against a knowledge base (KB). In earlier work, we introduced IndEL — the first end-to-end EL benchmark dataset for the Indonesian language — covering both a general domain (news) and a specific domain (religious text from the Indonesian translation of the Quran), and evaluated four traditional end-to-end EL systems on this dataset. In this study, we propose ELEVATE-ID, a comprehensive evaluation framework for assessing LLM performance on end-to-end EL in Indonesian. The framework evaluates LLMs under both zero-shot and fine-tuned conditions, using multilingual and Indonesian monolingual models, with Wikidata as the target KB. Our experiments include performance benchmarking, generalization analysis across domains, and systematic error analysis. Results show that GPT-4 and GPT-3.5 achieve the highest accuracy in zero-shot and fine-tuned settings, respectively. However, even fine-tuned GPT-3.5 underperforms compared to DBpedia Spotlight — the weakest of the traditional model baselines — in the general domain. Interestingly, GPT-3.5 outperforms Babelfy in the specific domain. Generalization analysis indicates that fine-tuned GPT-3.5 adapts more effectively to cross-domain and mixed-domain scenarios. Error analysis uncovers persistent challenges that hinder LLM performance: difficulties with non-complete mentions, acronym disambiguation, and full-name recognition in formal contexts. These issues point to limitations in mention boundary detection and contextual grounding. Indonesian-pretrained LLMs, Komodo and Merak, reveal core weaknesses: template leakage and entity hallucination, respectively—underscoring architectural and training limitations in low-resource end-to-end EL.11Code and dataset are available at https://github.com/dice-group/ELEVATE-ID.}},
  author       = {{Gusmita, Ria Hari and Firmansyah, Asep Fajar and Zahera, Hamada Mohamed Abdelsamee and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{0169-023X}},
  journal      = {{Data & Knowledge Engineering}},
  keywords     = {{LLMs, Evaluation, End-to-end EL, Indonesian}},
  pages        = {{102504}},
  title        = {{{ELEVATE-ID: Extending Large Language Models for End-to-End Entity Linking Evaluation in Indonesian}}},
  doi          = {{https://doi.org/10.1016/j.datak.2025.102504}},
  volume       = {{161}},
  year         = {{2026}},
}

@unpublished{62723,
  abstract     = {{Structural measures of graphs, such as treewidth, are central tools in computational complexity resulting in efficient algorithms when exploiting the parameter. It is even known that modern SAT solvers work efficiently on instances of small treewidth. Since these solvers are widely applied, research interests in compact encodings into (Q)SAT for solving and to understand encoding limitations. Even more general is the graph parameter clique-width, which unlike treewidth can be small for dense graphs. Although algorithms are available for clique-width, little is known about encodings. We initiate the quest to understand encoding capabilities with clique-width by considering abstract argumentation, which is a robust framework for reasoning with conflicting arguments. It is based on directed graphs and asks for computationally challenging properties, making it a natural candidate to study computational properties. We design novel reductions from argumentation problems to (Q)SAT. Our reductions linearly preserve the clique-width, resulting in directed decomposition-guided (DDG) reductions. We establish novel results for all argumentation semantics, including counting. Notably, the overhead caused by our DDG reductions cannot be significantly improved under reasonable assumptions.}},
  author       = {{Mahmood, Yasir and Hecher, Markus and Groven, Johanna and Fichte, Johannes K.}},
  booktitle    = {{Pre-print of paper accepted at AAAI 2026}},
  title        = {{{Structure-Aware Encodings of Argumentation Properties for Clique-width}}},
  year         = {{2026}},
}

@unpublished{62721,
  abstract     = {{We introduce the notion of contrastive ABox explanations to answer questions of the type "Why is a an instance of C, but b is not?". While there are various approaches for explaining positive entailments (why is C(a) entailed by the knowledge base) as well as missing entailments (why is C(b) not entailed) in isolation, contrastive explanations consider both at the same time, which allows them to focus on the relevant commonalities and differences between a and b. We develop an appropriate notion of contrastive explanations for the special case of ABox reasoning with description logic ontologies, and analyze the computational complexity for different variants under different optimality criteria, considering lightweight as well as more expressive description logics. We implemented a first method for computing one variant of contrastive explanations, and evaluated it on generated problems for realistic knowledge bases.}},
  author       = {{Koopmann, Patrick and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille and Tiwari, Balram}},
  booktitle    = {{Pre-print of paper accepted at AAAI 2026}},
  title        = {{{Can You Tell the Difference? Contrastive Explanations for ABox Entailments}}},
  year         = {{2026}},
}

@article{54450,
  abstract     = {{In the last decade, there has been increasing interest in allowing users to understand how the predictions of machine-learned models come about, thus increasing transparency and empowering users to understand and potentially contest those decisions.Dialogue-based approaches, in contrast to traditional one-shot eXplainable Artificial Intelligence (XAI) methods, facilitate interactive, in-depth exploration through multi-turn dialogues, simulating expert conversations. This paper reviews the current state of dialogue-based XAI, presenting a systematic review of 1,339 publications, narrowed down to 14 based on inclusion criteria. We explore theoretical foundations of the systems, propose key dimensions along which different solutions to dialogue-based XAI differ, and identify key use cases, target audiences, system components, and the types of supported queries and responses. Furthermore, we investigate the current paradigms by which systems are evaluated and highlight their key limitations. Key findings include identifying the main use cases, objectives, and audiences targeted by dialogue-based XAI methods, and summarize the main types of questions and information needs. Beyond discussing avenues for future work, we present a meta-architecture for these systems from existing literature and outlined prevalent theoretical frameworks.}},
  author       = {{Mindlin, Dimitry and Beer, Fabian and Sieger, Leonie Nora and Heindorf, Stefan and Cimiano, Philipp and Esposito, Elena and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Artificial Intelligence Review}},
  number       = {{3}},
  publisher    = {{Springer}},
  title        = {{{Beyond One-Shot Explanations: A Systematic Literature Review of Dialogue-Based XAI Approaches}}},
  doi          = {{10.1007/s10462-024-11007-7}},
  volume       = {{58}},
  year         = {{2025}},
}

@unpublished{61065,
  abstract     = {{Abduction is the task of computing a sufficient extension of a knowledge base (KB) that entails a conclusion not entailed by the original KB. It serves to compute explanations, or hypotheses, for such missing entailments. While this task has been intensively investigated for perfect data and under classical semantics, less is known about abduction when erroneous data results in inconsistent KBs. In this paper we define a suitable notion of abduction under repair semantics and propose a set of minimality criteria that guides abduction towards `useful' hypotheses. We provide initial complexity results on deciding existence of and verifying abductive solutions with these criteria, under different repair semantics and for the description logics DL-Lite and EL_bot.}},
  author       = {{Haak, Anselm and Koopmann, Patrick and Mahmood, Yasir and Turhan, Anni-Yasmin}},
  booktitle    = {{arXiv:2507.21955}},
  title        = {{{Why not? Developing ABox Abduction beyond Repairs}}},
  year         = {{2025}},
}

@inproceedings{63888,
  author       = {{Haak, Anselm and Koopmann, Patrick and Mahmood, Yasir and Turhan, Anni-Yasmin}},
  booktitle    = {{Proceedings of the 38th International Workshop on Description Logics - DL 2025}},
  editor       = {{Tendera, Lidia and Ibanez Garcia, Yazmin and Koopmann, Patrick}},
  location     = {{Opole, Poland}},
  title        = {{{Why not? Developing ABox Abduction beyond Repairs}}},
  year         = {{2025}},
}

@inproceedings{59910,
  abstract     = {{<jats:p>The connection between inconsistent databases and Dung’s abstract argumentation framework has recently drawn growing interest. Specifically, an inconsistent database, involving certain types of integrity constraints such as functional and inclusion dependencies, can be viewed as an argumentation framework in Dung’s setting. Nevertheless, no prior work has explored the exact expressive power of Dung’s theory of argumentation when compared to inconsistent databases and integrity constraints. In this paper, we close this gap by arguing that an argumentation framework can also be viewed as an inconsistent database. We first establish a connection between subset-repairs for databases and extensions for AFs considering conflict-free, naive, admissible, and preferred semantics. Further, we define a new family of attribute-based repairs based on the principle of maximal content preservation. The effectiveness of these repairs is then highlighted by connecting them to stable, semi-stable, and stage semantics. Our main contributions include translating an argumentation framework into a database together with integrity constraints. Moreover, this translation can be achieved in polynomial time, which is essential in transferring complexity results between the two formalisms.</jats:p>}},
  author       = {{Mahmood, Yasir and Hecher, Markus and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the AAAI Conference on Artificial Intelligence}},
  issn         = {{2374-3468}},
  number       = {{14}},
  pages        = {{15058--15066}},
  publisher    = {{Association for the Advancement of Artificial Intelligence (AAAI)}},
  title        = {{{Dung’s Argumentation Framework: Unveiling the Expressive Power with Inconsistent Databases}}},
  doi          = {{10.1609/aaai.v39i14.33651}},
  volume       = {{39}},
  year         = {{2025}},
}

@article{61123,
  abstract     = {{<jats:p>Knowledge graphs are used by a growing number of applications to represent structured data. Hence, evaluating the veracity of assertions in knowledge graphs—dubbed fact checking—is currently a challenge of growing importance. However, manual fact checking is commonly impractical due to the sheer size of knowledge graphs. This paper is a systematic survey of recent works on automatic fact checking with a focus on knowledge graphs. We present recent fact-checking approaches, the varied sources they use as background knowledge, and the features they rely upon. Finally, we draw conclusions pertaining to possible future research directions in fact checking knowledge graphs.</jats:p>}},
  author       = {{Qudus, Umair and Röder, Michael and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{0360-0300}},
  journal      = {{ACM Computing Surveys}},
  keywords     = {{fact checking, knowledge graphs, fact-checkers, check worthiness, evidence retrieval, trust, veracity.}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Fact Checking Knowledge Graphs -- A Survey}}},
  doi          = {{10.1145/3749838}},
  volume       = {{58}},
  year         = {{2025}},
}

@article{59912,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>We study the expressivity and the complexity of various logics in probabilistic team semantics with the Boolean negation. In particular, we study the extension of probabilistic independence logic with the Boolean negation, and a recently introduced logic first-order theory of random variables with probabilistic independence. We give several results that compare the expressivity of these logics with the most studied logics in probabilistic team semantics setting, as well as relating their expressivity to a numerical variant of second-order logic. In addition, we introduce novel entropy atoms and show that the extension of first-order logic by entropy atoms subsumes probabilistic independence logic. Finally, we obtain some results on the complexity of model checking, validity and satisfiability of our logics.</jats:p>}},
  author       = {{Hannula, Miika and Hirvonen, Minna and Kontinen, Juha and Mahmood, Yasir and Meier, Arne and Virtema, Jonni}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  number       = {{3}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{Logics with probabilistic team semantics and the Boolean negation}}},
  doi          = {{10.1093/logcom/exaf021}},
  volume       = {{35}},
  year         = {{2025}},
}

@inproceedings{59054,
  author       = {{Firmansyah, Asep Fajar and Zahera, Hamada Mohamed Abdelsamee and Sherif, Mohamed and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{ESWC2025}},
  isbn         = {{978-3-031-94575-5}},
  keywords     = {{firmansyah mousallem ngonga sherif zahera}},
  pages        = {{133----151}},
  publisher    = {{pringer Nature Switzerland}},
  title        = {{{ANTS: Abstractive Entity Summarization in Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-94575-5_8}},
  year         = {{2025}},
}

@unpublished{61066,
  abstract     = {{Argumentation is a central subarea of Artificial Intelligence (AI) for
modeling and reasoning about arguments. The semantics of abstract argumentation
frameworks (AFs) is given by sets of arguments (extensions) and conditions on
the relationship between them, such as stable or admissible. Today's solvers
implement tasks such as finding extensions, deciding credulous or skeptical
acceptance, counting, or enumerating extensions. While these tasks are well
charted, the area between decision, counting/enumeration and fine-grained
reasoning requires expensive reasoning so far. We introduce a novel concept
(facets) for reasoning between decision and enumeration. Facets are arguments
that belong to some extensions (credulous) but not to all extensions
(skeptical). They are most natural when a user aims to navigate, filter, or
comprehend the significance of specific arguments, according to their needs. We
study the complexity and show that tasks involving facets are much easier than
counting extensions. Finally, we provide an implementation, and conduct
experiments to demonstrate feasibility.}},
  author       = {{Fichte, Johannes and Fröhlich, Nicolas and Hecher, Markus and Lagerkvist, Victor and Mahmood, Yasir and Meier, Arne and Persson, Jonathan}},
  booktitle    = {{arXiv:2505.10982}},
  title        = {{{Facets in Argumentation: A Formal Approach to Argument Significance}}},
  year         = {{2025}},
}

@inproceedings{61202,
  abstract     = {{The number of datasets on the web of data increases continuously. However, the knowledge contained therein cannot be fully utilized without finding links between the entities contained in these datasets. Equivalent entities can not be identified solely by checking the equivalence of IRIs because of the different origins and naming schemes of different data providers. Yet, such equivalences can be discovered by computing the similarity of their attributes. In this paper we propose GLIDE, an approach that links entities from two different datasets by embedding a joint model of these datasets enriched by additional relations describing the similarity of literals. The joint model is embedded into a latent vector space while paying attention to juxtaposing similar literals. We evaluate our approach against state-of-the-art algorithms using real-world datasets commonly used in link discovery literature. The results show that GLIDE outperforms all baselines on 5 of 7 datasets with perfect or near-perfect accuracy. Our approach achieves its best performance on datasets that feature several literals with similarities. Our experiments indicate that researchers should not only pay attention to equal literals in knowledge graph embedding but should also be aware of the distance between similar literals.}},
  author       = {{Becker, Alexander and Ngonga Ngomo, Axel-Cyrille and Sherif, Mohamed }},
  booktitle    = {{The Semantic Web – ISWC 2025}},
  keywords     = {{becker sherif enexa sailproject dice simba ngonga whale}},
  title        = {{{GLIDE: Knowledge Graph Linking using Distance-Aware Embeddings}}},
  year         = {{2025}},
}

@article{61134,
  author       = {{Manzoor, Ali and Speck, René and Zahera, Hamada Mohamed Abdelsamee and Saleem, Muhammad and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{2169-3536}},
  journal      = {{IEEE Access}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Multilingual Relation Extraction - A Survey}}},
  doi          = {{10.1109/access.2025.3604258}},
  year         = {{2025}},
}

@article{61445,
  abstract     = {{ABSTRACT In recent years, there has been an increasing awareness of the importance of incorporating diversity into research projects, focusing on both how they are conducted and their content. Funding organizations have started to require that research applicants pay attention to inclusion and diversity by considering gender dimensions and other diversity factors in their project plans and ensuring gender equality during execution. Based on an extensive literature research and expert discussions on how to develop and implement diversity strategies in large collaborative research projects, we argue that there is a lack of practical advice in existing literature. Drawing from our own experiences in conceptualizing and implementing a Diversity Program across four universities in Germany, we propose a framework for effectively integrating diversity into collaborative research initiatives across various academic fields.}},
  author       = {{Lorke, Mariya and Amelung, Rena and Kuchling, Peter and Paaßen, Benjamin and Pein-Hackelbusch, Miriam and Schloots, Franziska Margarete and Schulz, Klara and Nauerth, Annette}},
  journal      = {{Diversity & Inclusion Research}},
  keywords     = {{collaborative research projects, diversity strategy, gender equality}},
  number       = {{4}},
  pages        = {{e70040}},
  title        = {{{Development and Implementation of Diversity Programs in Large Collaborative Research Projects: An Example From Germany}}},
  doi          = {{https://doi.org/10.1002/dvr2.70040}},
  volume       = {{2}},
  year         = {{2025}},
}

@inbook{61537,
  author       = {{Schloots, Franziska Margarete}},
  booktitle    = {{Virtuelles Essen- Interdisziplinäre Perspektiven auf Ernährungspraktiken im digitalen Zeitalter}},
  editor       = {{Jürgens, Jane Lia and Lewandowski, Kira and Aßmann, Sandra}},
  isbn         = {{978-3-8394-7633-8}},
  keywords     = {{Selbstvermessung, Wearables, Diet Tracking, Quantifizierung}},
  pages        = {{225--251}},
  publisher    = {{transcript Verlag}},
  title        = {{{Essen nach Zahlen - Die Quantifizierung von Ernährung im Alltag}}},
  doi          = {{10.14361/9783839476338}},
  year         = {{2025}},
}

@inproceedings{62119,
  author       = {{Ihtassine, Reda and Firmansyah, Asep Fajar and Srivastava, Nikit and Ali, Manzoor and Ngonga Ngomo, Axel-Cyrille and Sherif, Mohamed}},
  booktitle    = {{Proceedings of the 12th Knowledge Capture Conference 2025, {K-CAP} 2025, The Thirteenth International Conference on Knowledge Capture, December 10 - 12, 2025, Dayton, Ohio, USA}},
  keywords     = {{Srivastava ali dice enexa firmansyah ihtassine ngonga sailproject sherif whale}},
  publisher    = {{ACM}},
  title        = {{{NL2LS: LLM-based Automatic Linking of Knowledge Graphs}}},
  year         = {{2025}},
}

@inbook{62701,
  abstract     = {{Learning  continuous  vector  representations  for  knowledge graphs has signiﬁcantly improved state-of-the-art performances in many challenging tasks. Yet, deep-learning-based models are only post-hoc and locally explainable. In contrast, learning Web Ontology Language (OWL) class  expressions  in  Description  Logics  (DLs)  is  ante-hoc  and  globally explainable. However, state-of-the-art learners have two well-known lim-itations:  scaling  to  large  knowledge  graphs  and  handling  missing  infor-mation.  Here,  we  present  a  decision-tree-based  learner  (tDL)  to  learn Web  Ontology  Languages  (OWLs)  class  expressions  over  large  knowl-edge graphs, while imputing missing triples. Given positive and negative example individuals, tDL  ﬁrstly constructs unique OWL expressions in .SHOIN from  concise  bounded  descriptions  of  individuals.  Each  OWL class expression is used as a feature in a binary classiﬁcation problem to represent input individuals. Thereafter, tDL  ﬁts a CART decision tree to learn Boolean decision rules distinguishing positive examples from nega-tive examples. A ﬁnal OWL expression in.SHOIN is built by traversing the  built  CART  decision  tree  from  the  root  node  to  leaf  nodes  for  each positive example. By this, tDL  can learn OWL class expressions without exploration, i.e., the number of queries to a knowledge graph is bounded by the number of input individuals. Our empirical results show that tDL outperforms  the  current state-of-the-art  models  across datasets. Impor-tantly, our experiments over a large knowledge graph (DBpedia with 1.1 billion triples) show that tDL  can eﬀectively learn accurate OWL class expressions,  while  the  state-of-the-art  models  fail  to  return  any  results. Finally,  expressions  learned  by  tDL  can  be  seamlessly  translated  into natural language explanations using a pre-trained large language model and a DL verbalizer.}},
  author       = {{Demir, Caglar and Yekini, Moshood and Röder, Michael and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032060655}},
  issn         = {{0302-9743}},
  keywords     = {{Decision Tree, OWL Class Expression Learning, Description Logic, Knowledge Graph, Large Language Model, Verbalizer}},
  location     = {{Porto, Portugal}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Tree-Based OWL Class Expression Learner over Large Graphs}}},
  doi          = {{10.1007/978-3-032-06066-2_29}},
  year         = {{2025}},
}

@inproceedings{61041,
  abstract     = {{Large Language Models (LLMs) are increasingly deployed in real-world applications that require access to up-to-date knowledge. However, retraining LLMs is computationally expensive. Therefore, knowledge editing techniques are crucial for maintaining current information and correcting erroneous assertions within pre-trained models. Current benchmarks for knowledge editing primarily focus on recalling edited facts, often neglecting their logical consequences. To address this limitation, we introduce a new benchmark designed to evaluate how knowledge editing methods handle the logical consequences of a single fact edit. Our benchmark extracts relevant logical rules from a knowledge graph for a given edit. Then, it generates multi-hop questions based on these rules to assess the impact on logical consequences. Our findings indicate that while existing knowledge editing approaches can accurately insert direct assertions into LLMs, they frequently fail to inject entailed knowledge. Specifically, experiments with popular methods like ROME and FT reveal a substantial performance gap, up to 24%, between evaluations on directly edited knowledge and on entailed knowledge. This highlights the critical need for semantics-aware evaluation frameworks in knowledge editing.}},
  author       = {{Moteu Ngoli, Tatiana and Kouagou, N'Dah Jean and Zahera, Hamada Mohamed Abdelsamee and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 24th International Semantic Web Conference (ISWC 2025)}},
  isbn         = {{978-3-032-09530-5}},
  keywords     = {{dice sailproject moteu kouagou zahera ngonga}},
  location     = {{Nara, Japan}},
  pages        = {{pp 41--56}},
  publisher    = {{Springer, Cham}},
  title        = {{{Benchmarking Knowledge Editing using Logical Rules}}},
  doi          = {{https://doi.org/10.1007/978-3-032-09530-5_3}},
  year         = {{2025}},
}

@inproceedings{62007,
  abstract     = {{Ensemble methods are widely employed to improve generalization in machine learning. This has also prompted the adoption of ensemble learning for the knowledge graph embedding (KGE) models in performing link prediction. Typical approaches to this end train multiple models as part of the ensemble, and the diverse predictions are then averaged. However, this approach has some significant drawbacks. For instance, the computational overhead of training multiple models increases latency and memory overhead. In contrast, model merging approaches offer a promising alternative that does not require training multiple models. In this work, we introduce model merging, specifically weighted averaging, in
KGE models. Herein, a running average of model parameters from a training epoch onward is maintained and used for predictions. To address this, we additionally propose an approach that selectively updates the running average of the ensemble model parameters only when the generalization performance improves on a validation dataset. We evaluate these two different weighted averaging approaches on link prediction tasks, comparing the state-of-the-art benchmark ensemble approach. Additionally, we evaluate the weighted averaging approach considering literal-augmented KGE models and multi-hop query answering tasks as well. The results demonstrate that the proposed weighted averaging approach consistently improves performance across diverse evaluation settings.}},
  author       = {{Sapkota, Rupesh and Demir, Caglar and Sharma, Arnab and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Thirteenth International Conference on Knowledge Capture(K-CAP 2025)}},
  keywords     = {{Knowledge Graphs, Embeddings, Ensemble Learning}},
  location     = {{Dayton, OH, USA}},
  publisher    = {{ACM}},
  title        = {{{Parameter Averaging in Link Prediction}}},
  doi          = {{https://doi.org/10.1145/3731443.3771365}},
  year         = {{2025}},
}

@inbook{63507,
  author       = {{Pandit, Gaurav and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031945748}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Evaluating Approximate Nearest Neighbour Search Systems on Knowledge Graph Embeddings}}},
  doi          = {{10.1007/978-3-031-94575-5_4}},
  year         = {{2025}},
}

