@article{54450,
  abstract     = {{In the last decade, there has been increasing interest in allowing users to understand how the predictions of machine-learned models come about, thus increasing transparency and empowering users to understand and potentially contest those decisions.Dialogue-based approaches, in contrast to traditional one-shot eXplainable Artificial Intelligence (XAI) methods, facilitate interactive, in-depth exploration through multi-turn dialogues, simulating expert conversations. This paper reviews the current state of dialogue-based XAI, presenting a systematic review of 1,339 publications, narrowed down to 14 based on inclusion criteria. We explore theoretical foundations of the systems, propose key dimensions along which different solutions to dialogue-based XAI differ, and identify key use cases, target audiences, system components, and the types of supported queries and responses. Furthermore, we investigate the current paradigms by which systems are evaluated and highlight their key limitations. Key findings include identifying the main use cases, objectives, and audiences targeted by dialogue-based XAI methods, and summarize the main types of questions and information needs. Beyond discussing avenues for future work, we present a meta-architecture for these systems from existing literature and outlined prevalent theoretical frameworks.}},
  author       = {{Mindlin, Dimitry and Beer, Fabian and Sieger, Leonie Nora and Heindorf, Stefan and Cimiano, Philipp and Esposito, Elena and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Artificial Intelligence Review}},
  number       = {{3}},
  publisher    = {{Springer}},
  title        = {{{Beyond One-Shot Explanations: A Systematic Literature Review of Dialogue-Based XAI Approaches}}},
  doi          = {{10.1007/s10462-024-11007-7}},
  volume       = {{58}},
  year         = {{2025}},
}

@unpublished{37937,
  abstract     = {{Knowledge bases are widely used for information management on the web,
enabling high-impact applications such as web search, question answering, and
natural language processing. They also serve as the backbone for automatic
decision systems, e.g. for medical diagnostics and credit scoring. As
stakeholders affected by these decisions would like to understand their
situation and verify fair decisions, a number of explanation approaches have
been proposed using concepts in description logics. However, the learned
concepts can become long and difficult to fathom for non-experts, even when
verbalized. Moreover, long concepts do not immediately provide a clear path of
action to change one's situation. Counterfactuals answering the question "How
must feature values be changed to obtain a different classification?" have been
proposed as short, human-friendly explanations for tabular data. In this paper,
we transfer the notion of counterfactuals to description logics and propose the
first algorithm for generating counterfactual explanations in the description
logic $\mathcal{ELH}$. Counterfactual candidates are generated from concepts
and the candidates with fewest feature changes are selected as counterfactuals.
In case of multiple counterfactuals, we rank them according to the likeliness
of their feature combinations. For evaluation, we conduct a user survey to
investigate which of the generated counterfactual candidates are preferred for
explanation by participants. In a second study, we explore possible use cases
for counterfactual explanations.}},
  author       = {{Sieger, Leonie Nora and Heindorf, Stefan and Blübaum, Lukas and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{arXiv:2301.05109}},
  title        = {{{Explaining ELH Concept Descriptions through Counterfactual Reasoning}}},
  year         = {{2023}},
}

@inproceedings{34674,
  abstract     = {{Smart home systems contain plenty of features that enhance wellbeing in everyday life through artificial intelligence (AI). However, many users feel insecure because they do not understand the AI’s functionality and do not feel they are in control of it. Combining technical, psychological and philosophical views on AI, we rethink smart homes as interactive systems where users can partake in an intelligent agent’s learning. Parallel to the goals of explainable AI (XAI), we explored the possibility of user involvement in supervised learning of the smart home to have a first approach to improve acceptance, support subjective understanding and increase perceived control. In this work, we conducted two studies: In an online pre-study, we asked participants about their attitude towards teaching AI via a questionnaire. In the main study, we performed a Wizard of Oz laboratory experiment with human participants, where participants spent time in a prototypical smart home and taught activity recognition to the intelligent agent through supervised learning based on the user’s behaviour. We found that involvement in the AI’s learning phase enhanced the users’ feeling of control, perceived understanding and perceived usefulness of AI in general. The participants reported positive attitudes towards training a smart home AI and found the process understandable and controllable. We suggest that involving the user in the learning phase could lead to better personalisation and increased understanding and control by users of intelligent agents for smart home automation.}},
  author       = {{Sieger, Leonie Nora and Hermann, Julia and Schomäcker, Astrid and Heindorf, Stefan and Meske, Christian and Hey, Celine-Chiara and Doğangün, Ayşegül}},
  booktitle    = {{International Conference on Human-Agent Interaction}},
  keywords     = {{human-agent interaction, smart homes, supervised learning, participation}},
  location     = {{Christchurch, New Zealand}},
  publisher    = {{ACM}},
  title        = {{{User Involvement in Training Smart Home Agents}}},
  doi          = {{10.1145/3527188.3561914}},
  year         = {{2022}},
}

