@inproceedings{63918,
  abstract     = {{Many real-world datasets, such as citation networks, social networks, and molecular structures, are naturally represented as heterogeneous graphs, where nodes belong to different types and have additional features. For example, in a citation network, nodes representing "Paper" or "Author" may include attributes like keywords or affiliations. A critical machine learning task on these graphs is node classification, which is useful for applications such as fake news detection, corporate risk assessment, and molecular property prediction. Although Heterogeneous Graph Neural Networks (HGNNs) perform well in these contexts, their predictions remain opaque. Existing post-hoc explanation methods lack support for actual node features beyond one-hot encoding of node type and often fail to generate realistic, faithful explanations. To address these gaps, we propose DiGNNExplainer, a model-level explanation approach that synthesizes heterogeneous graphs with realistic node features via discrete denoising diffusion. In particular, we generate realistic discrete features (e.g., bag-of-words features) using diffusion models within a discrete space, whereas previous approaches are limited to continuous spaces. We evaluate our approach on multiple datasets and show that DiGNNExplainer produces explanations that are realistic and faithful to the model's decision-making, outperforming state-of-the-art methods.}},
  author       = {{Das, Pallabee and Heindorf, Stefan}},
  booktitle    = {{Proceedings of the ACM Web Conference 2026 (WWW ’26)}},
  location     = {{Dubai, United Arab Emirates}},
  publisher    = {{ACM}},
  title        = {{{Discrete Diffusion-Based Model-Level Explanation of Heterogeneous GNNs with Node Features}}},
  year         = {{2026}},
}

@article{54450,
  abstract     = {{In the last decade, there has been increasing interest in allowing users to understand how the predictions of machine-learned models come about, thus increasing transparency and empowering users to understand and potentially contest those decisions.Dialogue-based approaches, in contrast to traditional one-shot eXplainable Artificial Intelligence (XAI) methods, facilitate interactive, in-depth exploration through multi-turn dialogues, simulating expert conversations. This paper reviews the current state of dialogue-based XAI, presenting a systematic review of 1,339 publications, narrowed down to 14 based on inclusion criteria. We explore theoretical foundations of the systems, propose key dimensions along which different solutions to dialogue-based XAI differ, and identify key use cases, target audiences, system components, and the types of supported queries and responses. Furthermore, we investigate the current paradigms by which systems are evaluated and highlight their key limitations. Key findings include identifying the main use cases, objectives, and audiences targeted by dialogue-based XAI methods, and summarize the main types of questions and information needs. Beyond discussing avenues for future work, we present a meta-architecture for these systems from existing literature and outlined prevalent theoretical frameworks.}},
  author       = {{Mindlin, Dimitry and Beer, Fabian and Sieger, Leonie Nora and Heindorf, Stefan and Cimiano, Philipp and Esposito, Elena and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Artificial Intelligence Review}},
  number       = {{3}},
  publisher    = {{Springer}},
  title        = {{{Beyond One-Shot Explanations: A Systematic Literature Review of Dialogue-Based XAI Approaches}}},
  doi          = {{10.1007/s10462-024-11007-7}},
  volume       = {{58}},
  year         = {{2025}},
}

@article{63624,
  title        = {{{CQD-SHAP: Explainable Complex Query Answering via Shapley Values}}},
  doi          = {{10.48550/ARXIV.2510.15623}},
  year         = {{2025}},
}

@inproceedings{62707,
  author       = {{Heindorf, Stefan and Neib, Daniel}},
  booktitle    = {{Proceedings of the 34th ACM International Conference on Information and Knowledge Management}},
  publisher    = {{ACM}},
  title        = {{{Assessing Natural Language Explanations of Relational Graph Neural Networks}}},
  doi          = {{10.1145/3746252.3760918}},
  year         = {{2025}},
}

@inproceedings{54449,
  author       = {{KOUAGOU, N'Dah Jean and Demir, Caglar and Zahera, Hamada Mohamed Abdelsamee and Wilke, Adrian and Heindorf, Stefan and Li, Jiayi and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Companion Proceedings of the ACM on Web Conference 2024}},
  location     = {{Singapore}},
  publisher    = {{ACM}},
  title        = {{{Universal Knowledge Graph Embeddings}}},
  doi          = {{10.1145/3589335.3651978}},
  year         = {{2024}},
}

@unpublished{54448,
  abstract     = {{Graph Neural Networks (GNNs) are effective for node classification in
graph-structured data, but they lack explainability, especially at the global
level. Current research mainly utilizes subgraphs of the input as local
explanations or generates new graphs as global explanations. However, these
graph-based methods are limited in their ability to explain classes with
multiple sufficient explanations. To provide more expressive explanations, we
propose utilizing class expressions (CEs) from the field of description logic
(DL). Our approach explains heterogeneous graphs with different types of nodes
using CEs in the EL description logic. To identify the best explanation among
multiple candidate explanations, we employ and compare two different scoring
functions: (1) For a given CE, we construct multiple graphs, have the GNN make
a prediction for each graph, and aggregate the predicted scores. (2) We score
the CE in terms of fidelity, i.e., we compare the predictions of the GNN to the
predictions by the CE on a separate validation set. Instead of subgraph-based
explanations, we offer CE-based explanations.}},
  author       = {{Köhler, Dominik and Heindorf, Stefan}},
  booktitle    = {{arXiv:2405.12654}},
  title        = {{{Utilizing Description Logics for Global Explanations of Heterogeneous  Graph Neural Networks}}},
  year         = {{2024}},
}

@inproceedings{52231,
  author       = {{Blübaum, Lukas and Heindorf, Stefan}},
  booktitle    = {{The World Wide Web Conference (WWW)}},
  location     = {{Singapore}},
  pages        = {{2204–2215}},
  publisher    = {{ACM}},
  title        = {{{Causal Question Answering with Reinforcement Learning}}},
  doi          = {{10.1145/3589334.3645610}},
  year         = {{2024}},
}

@inproceedings{56213,
  author       = {{Sapkota, Rupesh and Köhler, Dominik and Heindorf, Stefan}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM ’24),}},
  location     = {{Boise, Idaho, USA}},
  publisher    = {{ACM}},
  title        = {{{EDGE: Evaluation Framework for Logical vs. Subgraph Explanations for Node Classifiers on Knowledge Graphs}}},
  doi          = {{10.1145/3627673.3679904}},
  year         = {{2024}},
}

@inbook{56214,
  author       = {{Li, Jiayi and Satheesh, Sheetal and Heindorf, Stefan and Moussallem, Diego and Speck, René and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031637865}},
  issn         = {{1865-0929}},
  location     = {{Malta, Valletta}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{AutoCL: AutoML for Concept Learning}}},
  doi          = {{10.1007/978-3-031-63787-2_7}},
  year         = {{2024}},
}

@inbook{46460,
  author       = {{Ngonga Ngomo, Axel-Cyrille and Demir, Caglar and Kouagou, N'Dah Jean and Heindorf, Stefan and Karalis, Nikoloas and Bigerl, Alexander}},
  booktitle    = {{Compendium of Neurosymbolic Artificial Intelligence}},
  pages        = {{272–286}},
  publisher    = {{IOS Press}},
  title        = {{{Class Expression Learning with Multiple Representations}}},
  year         = {{2023}},
}

@article{46248,
  author       = {{Demir, Caglar and Wiebesiek, Michel and Lu, Renzhong and Ngonga Ngomo, Axel-Cyrille and Heindorf, Stefan}},
  journal      = {{ECML PKDD}},
  location     = {{Torino}},
  title        = {{{LitCQD: Multi-Hop Reasoning in Incomplete Knowledge Graphs with Numeric Literals}}},
  year         = {{2023}},
}

@inbook{47421,
  abstract     = {{Class expression learning in description logics has long been regarded as an iterative search problem in an infinite conceptual space. Each iteration of the search process invokes a reasoner and a heuristic function. The reasoner finds the instances of the current expression, and the heuristic function computes the information gain and decides on the next step to be taken. As the size of the background knowledge base grows, search-based approaches for class expression learning become prohibitively slow. Current neural class expression synthesis (NCES) approaches investigate the use of neural networks for class expression learning in the attributive language with complement (ALC). While they show significant improvements over search-based approaches in runtime and quality of the computed solutions, they rely on the availability of pretrained embeddings for the input knowledge base. Moreover, they are not applicable to ontologies in more expressive description logics. In this paper, we propose a novel NCES approach which extends the state of the art to the description logic ALCHIQ(D). Our extension, dubbed NCES2, comes with an improved training data generator and does not require pretrained embeddings for the input knowledge base as both the embedding model and the class expression synthesizer are trained jointly. Empirical results on benchmark datasets suggest that our approach inherits the scalability capability of current NCES instances with the additional advantage that it supports more complex learning problems. NCES2 achieves the highest performance overall when compared to search-based approaches and to its predecessor NCES. We provide our source code, datasets, and pretrained models at https://github.com/dice-group/NCES2.}},
  author       = {{Kouagou, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track}},
  isbn         = {{9783031434204}},
  issn         = {{0302-9743}},
  location     = {{Turin}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Neural Class Expression Synthesis in ALCHIQ(D)}}},
  doi          = {{10.1007/978-3-031-43421-1_12}},
  year         = {{2023}},
}

@unpublished{37937,
  abstract     = {{Knowledge bases are widely used for information management on the web,
enabling high-impact applications such as web search, question answering, and
natural language processing. They also serve as the backbone for automatic
decision systems, e.g. for medical diagnostics and credit scoring. As
stakeholders affected by these decisions would like to understand their
situation and verify fair decisions, a number of explanation approaches have
been proposed using concepts in description logics. However, the learned
concepts can become long and difficult to fathom for non-experts, even when
verbalized. Moreover, long concepts do not immediately provide a clear path of
action to change one's situation. Counterfactuals answering the question "How
must feature values be changed to obtain a different classification?" have been
proposed as short, human-friendly explanations for tabular data. In this paper,
we transfer the notion of counterfactuals to description logics and propose the
first algorithm for generating counterfactual explanations in the description
logic $\mathcal{ELH}$. Counterfactual candidates are generated from concepts
and the candidates with fewest feature changes are selected as counterfactuals.
In case of multiple counterfactuals, we rank them according to the likeliness
of their feature combinations. For evaluation, we conduct a user survey to
investigate which of the generated counterfactual candidates are preferred for
explanation by participants. In a second study, we explore possible use cases
for counterfactual explanations.}},
  author       = {{Sieger, Leonie Nora and Heindorf, Stefan and Blübaum, Lukas and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{arXiv:2301.05109}},
  title        = {{{Explaining ELH Concept Descriptions through Counterfactual Reasoning}}},
  year         = {{2023}},
}

@inproceedings{54612,
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{NeSy 2023, 17th International Workshop on Neural-Symbolic Learning and Reasoning, Certosa di Pontignano, Siena, Italy}},
  keywords     = {{318 SFB-TRR demir dice enexa heindorf knowgraphs kouagou ngonga sail}},
  publisher    = {{CEUR-WS}},
  title        = {{{Neural Class Expression Synthesis (Extended Abstract)}}},
  year         = {{2023}},
}

@inproceedings{33734,
  abstract     = {{Many applications require explainable node classification in knowledge graphs. Towards this end, a popular ``white-box'' approach is class expression learning: Given sets of positive and negative nodes, class expressions in description logics are learned that separate positive from negative nodes. Most existing approaches are search-based approaches generating many candidate class expressions and selecting the best one. However, they often take a long time to find suitable class expressions. In this paper, we cast class expression learning as a translation problem and propose a new family of class expression learning approaches which we dub neural class expression synthesizers. Training examples are ``translated'' into class expressions in a fashion akin to machine translation. Consequently, our synthesizers are not subject to the runtime limitations of search-based approaches. We study three instances of this novel family of approaches based on LSTMs, GRUs, and set transformers, respectively. An evaluation of our approach on four benchmark datasets suggests that it can effectively synthesize high-quality class expressions with respect to the input examples in approximately one second on average. Moreover, a comparison to state-of-the-art approaches suggests that we achieve better F-measures on large datasets. For reproducibility purposes, we provide our implementation as well as pretrained models in our public GitHub repository at https://github.com/dice-group/NeuralClassExpressionSynthesis}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 20th Extended Semantic Web Conference (ESWC 2023)}},
  editor       = {{Pesquita, Catia and Jimenez-Ruiz, Ernesto and McCusker, Jamie and Faria, Daniel and Dragoni, Mauro and Dimou, Anastasia and Troncy, Raphael and Hertling, Sven}},
  keywords     = {{Neural network, Concept learning, Description logics}},
  location     = {{Hersonissos, Crete, Greece}},
  pages        = {{209 -- 226}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Neural Class Expression Synthesis}}},
  doi          = {{https://doi.org/10.1007/978-3-031-33455-9_13}},
  volume       = {{13870}},
  year         = {{2023}},
}

@inproceedings{46575,
  author       = {{Baci, Alkid and Heindorf, Stefan}},
  booktitle    = {{CIKM}},
  location     = {{Birmingham, UK}},
  pages        = {{3733–3737}},
  title        = {{{Accelerating Concept Learning via Sampling}}},
  doi          = {{10.1145/3583780.3615158}},
  year         = {{2023}},
}

@inbook{33740,
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web}},
  isbn         = {{9783031069802}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Learning Concept Lengths Accelerates Concept Learning in ALC}}},
  doi          = {{10.1007/978-3-031-06981-9_14}},
  year         = {{2022}},
}

@inproceedings{34674,
  abstract     = {{Smart home systems contain plenty of features that enhance wellbeing in everyday life through artificial intelligence (AI). However, many users feel insecure because they do not understand the AI’s functionality and do not feel they are in control of it. Combining technical, psychological and philosophical views on AI, we rethink smart homes as interactive systems where users can partake in an intelligent agent’s learning. Parallel to the goals of explainable AI (XAI), we explored the possibility of user involvement in supervised learning of the smart home to have a first approach to improve acceptance, support subjective understanding and increase perceived control. In this work, we conducted two studies: In an online pre-study, we asked participants about their attitude towards teaching AI via a questionnaire. In the main study, we performed a Wizard of Oz laboratory experiment with human participants, where participants spent time in a prototypical smart home and taught activity recognition to the intelligent agent through supervised learning based on the user’s behaviour. We found that involvement in the AI’s learning phase enhanced the users’ feeling of control, perceived understanding and perceived usefulness of AI in general. The participants reported positive attitudes towards training a smart home AI and found the process understandable and controllable. We suggest that involving the user in the learning phase could lead to better personalisation and increased understanding and control by users of intelligent agents for smart home automation.}},
  author       = {{Sieger, Leonie Nora and Hermann, Julia and Schomäcker, Astrid and Heindorf, Stefan and Meske, Christian and Hey, Celine-Chiara and Doğangün, Ayşegül}},
  booktitle    = {{International Conference on Human-Agent Interaction}},
  keywords     = {{human-agent interaction, smart homes, supervised learning, participation}},
  location     = {{Christchurch, New Zealand}},
  publisher    = {{ACM}},
  title        = {{{User Involvement in Training Smart Home Agents}}},
  doi          = {{10.1145/3527188.3561914}},
  year         = {{2022}},
}

@inproceedings{33739,
  abstract     = {{At least 5% of questions submitted to search engines ask about cause-effect relationships in some way. To support the development of tailored approaches that can answer such questions, we construct Webis-CausalQA-22, a benchmark corpus of 1.1 million causal questions with answers. We distinguish different types of causal questions using a novel typology derived from a data-driven, manual analysis of questions from ten large question answering (QA) datasets. Using high-precision lexical rules, we extract causal questions of each type from these datasets to create our corpus. As an initial baseline, the state-of-the-art QA model UnifiedQA achieves a ROUGE-L F1 score of 0.48 on our new benchmark.}},
  author       = {{Bondarenko, Alexander and Wolska, Magdalena and Heindorf, Stefan and Blübaum, Lukas and Ngonga Ngomo, Axel-Cyrille and Stein, Benno and Braslavski, Pavel and Hagen, Matthias and Potthast, Martin}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  pages        = {{3296–3308}},
  publisher    = {{International Committee on Computational Linguistics}},
  title        = {{{CausalQA: A Benchmark for Causal Question Answering}}},
  year         = {{2022}},
}

