@inproceedings{63918,
  abstract     = {{Many real-world datasets, such as citation networks, social networks, and molecular structures, are naturally represented as heterogeneous graphs, where nodes belong to different types and have additional features. For example, in a citation network, nodes representing "Paper" or "Author" may include attributes like keywords or affiliations. A critical machine learning task on these graphs is node classification, which is useful for applications such as fake news detection, corporate risk assessment, and molecular property prediction. Although Heterogeneous Graph Neural Networks (HGNNs) perform well in these contexts, their predictions remain opaque. Existing post-hoc explanation methods lack support for actual node features beyond one-hot encoding of node type and often fail to generate realistic, faithful explanations. To address these gaps, we propose DiGNNExplainer, a model-level explanation approach that synthesizes heterogeneous graphs with realistic node features via discrete denoising diffusion. In particular, we generate realistic discrete features (e.g., bag-of-words features) using diffusion models within a discrete space, whereas previous approaches are limited to continuous spaces. We evaluate our approach on multiple datasets and show that DiGNNExplainer produces explanations that are realistic and faithful to the model's decision-making, outperforming state-of-the-art methods.}},
  author       = {{Das, Pallabee and Heindorf, Stefan}},
  booktitle    = {{Proceedings of the ACM Web Conference 2026 (WWW ’26)}},
  location     = {{Dubai, United Arab Emirates}},
  publisher    = {{ACM}},
  title        = {{{Discrete Diffusion-Based Model-Level Explanation of Heterogeneous GNNs with Node Features}}},
  year         = {{2026}},
}

@article{54450,
  abstract     = {{In the last decade, there has been increasing interest in allowing users to understand how the predictions of machine-learned models come about, thus increasing transparency and empowering users to understand and potentially contest those decisions.Dialogue-based approaches, in contrast to traditional one-shot eXplainable Artificial Intelligence (XAI) methods, facilitate interactive, in-depth exploration through multi-turn dialogues, simulating expert conversations. This paper reviews the current state of dialogue-based XAI, presenting a systematic review of 1,339 publications, narrowed down to 14 based on inclusion criteria. We explore theoretical foundations of the systems, propose key dimensions along which different solutions to dialogue-based XAI differ, and identify key use cases, target audiences, system components, and the types of supported queries and responses. Furthermore, we investigate the current paradigms by which systems are evaluated and highlight their key limitations. Key findings include identifying the main use cases, objectives, and audiences targeted by dialogue-based XAI methods, and summarize the main types of questions and information needs. Beyond discussing avenues for future work, we present a meta-architecture for these systems from existing literature and outlined prevalent theoretical frameworks.}},
  author       = {{Mindlin, Dimitry and Beer, Fabian and Sieger, Leonie Nora and Heindorf, Stefan and Cimiano, Philipp and Esposito, Elena and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Artificial Intelligence Review}},
  number       = {{3}},
  publisher    = {{Springer}},
  title        = {{{Beyond One-Shot Explanations: A Systematic Literature Review of Dialogue-Based XAI Approaches}}},
  doi          = {{10.1007/s10462-024-11007-7}},
  volume       = {{58}},
  year         = {{2025}},
}

@article{63624,
  title        = {{{CQD-SHAP: Explainable Complex Query Answering via Shapley Values}}},
  doi          = {{10.48550/ARXIV.2510.15623}},
  year         = {{2025}},
}

@inbook{63807,
  author       = {{Dettelbach, Andrea}},
  booktitle    = {{Beiträge zum Mathematikunterricht 2025 }},
  editor       = {{Schick, Lisa and Platz, Melanie and Lambert, Anselm}},
  location     = {{Saarbrücken}},
  pages        = {{634 -- 637}},
  publisher    = {{WTM - Verlag für wissenschaftliche Texte und Medien}},
  title        = {{{Rechnen mit Beziehungen - operative Beziehungen erkennen, beschreiben und nutzen. Entwicklung einer digitalbasierten Lernumgebung mit der App Rechenfeld}}},
  doi          = {{10.37626/GA9783959873307.0}},
  year         = {{2025}},
}

@inproceedings{62707,
  author       = {{Heindorf, Stefan and Neib, Daniel}},
  booktitle    = {{Proceedings of the 34th ACM International Conference on Information and Knowledge Management}},
  publisher    = {{ACM}},
  title        = {{{Assessing Natural Language Explanations of Relational Graph Neural Networks}}},
  doi          = {{10.1145/3746252.3760918}},
  year         = {{2025}},
}

@inbook{63507,
  author       = {{Pandit, Gaurav and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031945748}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Evaluating Approximate Nearest Neighbour Search Systems on Knowledge Graph Embeddings}}},
  doi          = {{10.1007/978-3-031-94575-5_4}},
  year         = {{2025}},
}

@inproceedings{63572,
  author       = {{Demir, Caglar and Yekini, Moshood Olawale and Röder, Michael and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032060655}},
  issn         = {{0302-9743}},
  location     = {{Porto}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Tree-Based OWL Class Expression Learner over Large Graphs}}},
  doi          = {{10.1007/978-3-032-06066-2_29}},
  year         = {{2025}},
}

@inproceedings{63575,
  author       = {{Kapoor, Sourabh and Sharma, Arnab and Röder, Michael and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031945748}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Robustness Evaluation of Knowledge Graph Embedding Models Under Non-targeted Attacks}}},
  doi          = {{10.1007/978-3-031-94575-5_15}},
  year         = {{2025}},
}

@inproceedings{63573,
  author       = {{Memariani, Adel and Röder, Michael and Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032095268}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Link Prediction Under Non-targeted Attacks: Do Soft Labels Always Help?}}},
  doi          = {{10.1007/978-3-032-09527-5_6}},
  year         = {{2025}},
}

@inproceedings{63574,
  author       = {{Zhang, Quannian and Röder, Michael and Srivastava, Nikit and KOUAGOU, N'Dah Jean and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Knowledge Capture Conference 2025}},
  publisher    = {{ACM}},
  title        = {{{Explainable Benchmarking through the Lense of Concept Learning}}},
  doi          = {{10.1145/3731443.3771359}},
  year         = {{2025}},
}

@inproceedings{54449,
  author       = {{KOUAGOU, N'Dah Jean and Demir, Caglar and Zahera, Hamada Mohamed Abdelsamee and Wilke, Adrian and Heindorf, Stefan and Li, Jiayi and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Companion Proceedings of the ACM on Web Conference 2024}},
  location     = {{Singapore}},
  publisher    = {{ACM}},
  title        = {{{Universal Knowledge Graph Embeddings}}},
  doi          = {{10.1145/3589335.3651978}},
  year         = {{2024}},
}

@unpublished{54448,
  abstract     = {{Graph Neural Networks (GNNs) are effective for node classification in
graph-structured data, but they lack explainability, especially at the global
level. Current research mainly utilizes subgraphs of the input as local
explanations or generates new graphs as global explanations. However, these
graph-based methods are limited in their ability to explain classes with
multiple sufficient explanations. To provide more expressive explanations, we
propose utilizing class expressions (CEs) from the field of description logic
(DL). Our approach explains heterogeneous graphs with different types of nodes
using CEs in the EL description logic. To identify the best explanation among
multiple candidate explanations, we employ and compare two different scoring
functions: (1) For a given CE, we construct multiple graphs, have the GNN make
a prediction for each graph, and aggregate the predicted scores. (2) We score
the CE in terms of fidelity, i.e., we compare the predictions of the GNN to the
predictions by the CE on a separate validation set. Instead of subgraph-based
explanations, we offer CE-based explanations.}},
  author       = {{Köhler, Dominik and Heindorf, Stefan}},
  booktitle    = {{arXiv:2405.12654}},
  title        = {{{Utilizing Description Logics for Global Explanations of Heterogeneous  Graph Neural Networks}}},
  year         = {{2024}},
}

@inproceedings{52231,
  author       = {{Blübaum, Lukas and Heindorf, Stefan}},
  booktitle    = {{The World Wide Web Conference (WWW)}},
  location     = {{Singapore}},
  pages        = {{2204–2215}},
  publisher    = {{ACM}},
  title        = {{{Causal Question Answering with Reinforcement Learning}}},
  doi          = {{10.1145/3589334.3645610}},
  year         = {{2024}},
}

@inproceedings{56213,
  author       = {{Sapkota, Rupesh and Köhler, Dominik and Heindorf, Stefan}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM ’24),}},
  location     = {{Boise, Idaho, USA}},
  publisher    = {{ACM}},
  title        = {{{EDGE: Evaluation Framework for Logical vs. Subgraph Explanations for Node Classifiers on Knowledge Graphs}}},
  doi          = {{10.1145/3627673.3679904}},
  year         = {{2024}},
}

@inbook{56214,
  author       = {{Li, Jiayi and Satheesh, Sheetal and Heindorf, Stefan and Moussallem, Diego and Speck, René and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031637865}},
  issn         = {{1865-0929}},
  location     = {{Malta, Valletta}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{AutoCL: AutoML for Concept Learning}}},
  doi          = {{10.1007/978-3-031-63787-2_7}},
  year         = {{2024}},
}

@inbook{47421,
  abstract     = {{Class expression learning in description logics has long been regarded as an iterative search problem in an infinite conceptual space. Each iteration of the search process invokes a reasoner and a heuristic function. The reasoner finds the instances of the current expression, and the heuristic function computes the information gain and decides on the next step to be taken. As the size of the background knowledge base grows, search-based approaches for class expression learning become prohibitively slow. Current neural class expression synthesis (NCES) approaches investigate the use of neural networks for class expression learning in the attributive language with complement (ALC). While they show significant improvements over search-based approaches in runtime and quality of the computed solutions, they rely on the availability of pretrained embeddings for the input knowledge base. Moreover, they are not applicable to ontologies in more expressive description logics. In this paper, we propose a novel NCES approach which extends the state of the art to the description logic ALCHIQ(D). Our extension, dubbed NCES2, comes with an improved training data generator and does not require pretrained embeddings for the input knowledge base as both the embedding model and the class expression synthesizer are trained jointly. Empirical results on benchmark datasets suggest that our approach inherits the scalability capability of current NCES instances with the additional advantage that it supports more complex learning problems. NCES2 achieves the highest performance overall when compared to search-based approaches and to its predecessor NCES. We provide our source code, datasets, and pretrained models at https://github.com/dice-group/NCES2.}},
  author       = {{Kouagou, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track}},
  isbn         = {{9783031434204}},
  issn         = {{0302-9743}},
  location     = {{Turin}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Neural Class Expression Synthesis in ALCHIQ(D)}}},
  doi          = {{10.1007/978-3-031-43421-1_12}},
  year         = {{2023}},
}

@unpublished{37937,
  abstract     = {{Knowledge bases are widely used for information management on the web,
enabling high-impact applications such as web search, question answering, and
natural language processing. They also serve as the backbone for automatic
decision systems, e.g. for medical diagnostics and credit scoring. As
stakeholders affected by these decisions would like to understand their
situation and verify fair decisions, a number of explanation approaches have
been proposed using concepts in description logics. However, the learned
concepts can become long and difficult to fathom for non-experts, even when
verbalized. Moreover, long concepts do not immediately provide a clear path of
action to change one's situation. Counterfactuals answering the question "How
must feature values be changed to obtain a different classification?" have been
proposed as short, human-friendly explanations for tabular data. In this paper,
we transfer the notion of counterfactuals to description logics and propose the
first algorithm for generating counterfactual explanations in the description
logic $\mathcal{ELH}$. Counterfactual candidates are generated from concepts
and the candidates with fewest feature changes are selected as counterfactuals.
In case of multiple counterfactuals, we rank them according to the likeliness
of their feature combinations. For evaluation, we conduct a user survey to
investigate which of the generated counterfactual candidates are preferred for
explanation by participants. In a second study, we explore possible use cases
for counterfactual explanations.}},
  author       = {{Sieger, Leonie Nora and Heindorf, Stefan and Blübaum, Lukas and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{arXiv:2301.05109}},
  title        = {{{Explaining ELH Concept Descriptions through Counterfactual Reasoning}}},
  year         = {{2023}},
}

@inproceedings{50797,
  author       = {{Röder, Michael and Kuchelev, Denis and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Knowledge Graphs and Semantic Web}},
  editor       = {{Ortiz-Rodriguez, Fernando and Villazón-Terrazas, Boris and Tiwari, Sanju and Bobed, Carlos}},
  isbn         = {{978-3-031-47745-4}},
  keywords     = {{sail dice roeder kuchelev ngonga}},
  pages        = {{183–198}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{A Topic Model for the Data Web}}},
  doi          = {{10.1007/978-3-031-47745-4_14}},
  year         = {{2023}},
}

@phdthesis{54607,
  author       = {{Röder, Michael}},
  keywords     = {{dice roeder}},
  publisher    = {{Paderborn University}},
  title        = {{{Automating the Discovery of Linking Candidates}}},
  doi          = {{10.17619/UNIPB/1-1666}},
  year         = {{2023}},
}

@inproceedings{54612,
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{NeSy 2023, 17th International Workshop on Neural-Symbolic Learning and Reasoning, Certosa di Pontignano, Siena, Italy}},
  keywords     = {{318 SFB-TRR demir dice enexa heindorf knowgraphs kouagou ngonga sail}},
  publisher    = {{CEUR-WS}},
  title        = {{{Neural Class Expression Synthesis (Extended Abstract)}}},
  year         = {{2023}},
}

