@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@techreport{65180,
  author       = {{Terfloth, Lutz and Buhl, Heike M. and Lohmer, Vivien and Schaffer, Michael and Kern, Frederike and Schulte, Carsten}},
  title        = {{{Bridging the Dual Nature: How Integrated Explanations Enhance Understanding of Technical Artifacts}}},
  year         = {{2026}},
}

@unpublished{61151,
  abstract     = {{In this paper, we discuss the application of retrospective video recall for the assessment of cognitive processes in explanatory interactions, such as understanding and mental models. Our purpose is to reflect on the benefits and limitations of video recall compared to another self-report method, ‘thinking-aloud’. To do so, we reveal empirical results from the application of video recall in three interdisciplinary research projects that applied the method for the qualitative and quantitative assessment of cognitive and behavioral phenomena in everyday explanations. In all three projects, video recall was applied as a post-hoc procedure following the recording of dyadic face-to-face explanations of board games. The design of the video recall procedure differed between individual projects because they pursued different research objectives – that is the investigation of (1) an interlocutor's multimodal signals of understanding, (2) the change in assumptions about an interlocutor's dispositional and situational knowledge, and (3) the differentiated assessment of an interlocutor's developing understanding of domain knowledge aspects by distinguishing between mechanistic and functional explanatory stances. By discussing the benefits and the limitations of each procedure, this article provides critical reflections on video recall as a versatile research method applied for the analysis of human multimodal behavior in interaction and cognitive processing.}},
  author       = {{Lazarov, Stefan Teodorov and Schaffer, Michael and Gladow, Viviane and Buschmeier, Hendrik and Buhl, Heike M. and Grimminger, Angela}},
  pages        = {{29}},
  title        = {{{Retrospective video recall for analyzing cognitive processes in naturalistic explanations}}},
  year         = {{2026}},
}

@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61243,
  author       = {{Fisher, Josephine Beryl and Terfloth, Lutz}},
  booktitle    = {{ Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2025)}},
  title        = {{{The Dual Nature as a Local Context to Explore Verbal Behaviour in Game Explanations}}},
  year         = {{2025}},
}

@article{57531,
  author       = {{Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}},
  journal      = {{AI and Ethics}},
  pages        = {{3015–3033}},
  publisher    = {{Springer}},
  title        = {{{Explanation needs and ethical demands: unpacking the instrumental value of XAI}}},
  doi          = {{https://doi.org/10.1007/s43681-024-00622-3}},
  volume       = {{5}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@inproceedings{57356,
  author       = {{Schaffer, Michael Erol  and Terfloth, Lutz and Schulte, Carsten and Buhl, Heike M.}},
  location     = {{Valletta, Malta}},
  title        = {{{Perception and Consideration of the Explainees’ Needs for Satisfying Explanations}}},
  year         = {{2024}},
}

@inproceedings{57357,
  author       = {{Schaffer, Michael Erol  and Terfloth, Lutz and Schulte, Carsten and Buhl, Heike M.}},
  booktitle    = {{Joint Proceedings of the xAI-2024 Late-breaking Work, Demos and Doctoral Consortium. 3793}},
  title        = {{{Explainers’ Mental Representations of Explainees’ Needs in Everyday Explanations}}},
  year         = {{2024}},
}

@inproceedings{56994,
  author       = {{Schaffer, Michael and Buhl, Heike M.}},
  booktitle    = {{53rd DGPs Congress - Abstracts}},
  editor       = {{Ansorge, Ulrich and Szaszkó, Bence and Werner, Lena}},
  location     = {{Vienna}},
  title        = {{{The mental representation of the object of explanation in the process of co-constructive explanations}}},
  year         = {{2024}},
}

@inproceedings{61403,
  author       = {{Lohmer, Vivien and Kern, Friederike}},
  booktitle    = {{Second International Multimodal Communication Symposium (MMSYM) - Book of Abstract}},
  keywords     = {{gesture, explanations, conversation analysis}},
  location     = {{Goethe-Universität Frankfurt, Deutschland}},
  title        = {{{The role of interactive gestures in explanatory interactions}}},
  year         = {{2024}},
}

@inproceedings{57058,
  author       = {{Lazarov, Stefan Teodorov and Schaffer, Michael and Ronoh, Erick Kiprop}},
  location     = {{Paderborn}},
  title        = {{{Approaches of Assessing Understanding Using Video-Recall Data}}},
  year         = {{2023}},
}

@inproceedings{57056,
  author       = {{Schaffer, Michael and Buhl, Heike M.}},
  location     = {{Kiel}},
  title        = {{{Erklärungsverläufe und -inhalte aus Sicht Erklärender - eine qualitative Studie}}},
  year         = {{2023}},
}

@inproceedings{47448,
  abstract     = {{In XAI it is important to consider that, in contrast to explanations for professional audiences, one cannot assume common expertise when explaining for laypeople. But such explanations between humans vary greatly, making it difficult to research commonalities across explanations. We used the dual nature theory, a techno-philosophical approach, to cope with these challenges. According to it, one can explain, for example, an XAI's decision by addressing its dual nature: by focusing on the Architecture (e.g., the logic of its algorithms) or the Relevance (e.g., the severity of a decision, the implications of a recommendation). We investigated 20 game explanations using the theory as an analytical framework. We elaborate how we used the theory to quickly structure and compare explanations of technological artifacts. We supplemented results from analyzing the explanation contents with results from a video recall to explore how explainers justified their explanation. We found that explainers were focusing on the physical aspects of the game first (Architecture) and only later on aspects of the Relevance. Reasoning in the video recalls indicated that EX regarded the focus on the Architecture as important for structuring the explanation initially by explaining the basic components before focusing on more complex, intangible aspects. Shifting between addressing the two sides was justified by explanation goals, emerging misunderstandings, and the knowledge needs of the explainee. We discovered several commonalities that inspire future research questions which, if further generalizable, provide first ideas for the construction of synthetic explanations.}},
  author       = {{Terfloth, Lutz and Schaffer, Michael and Buhl, Heike M. and Schulte, Carsten}},
  isbn         = {{978-3-031-44069-4}},
  location     = {{Lisboa}},
  publisher    = {{Springer, Cham}},
  title        = {{{Adding Why to What? Analyses of an Everyday Explanation}}},
  doi          = {{10.1007/978-3-031-44070-0_13}},
  year         = {{2023}},
}

@inproceedings{61402,
  author       = {{Lohmer, Vivien and Terfloth, Lutz and Kern, Friederike}},
  booktitle    = {{First International Multimodal Communication Symposium - Book of Abstract}},
  keywords     = {{gesture, dual nature, explanations, architecture, relevance}},
  location     = {{Universität Pompeu Fabra, Barcelona}},
  title        = {{{Explaining the Technical Artifact Quarto!: How Gestures are used in Everyday Explanations}}},
  year         = {{2023}},
}

@inproceedings{56993,
  author       = {{Schaffer, Michael and Lea, Budde and Schulte, Carsten and Buhl, Heike M.}},
  booktitle    = {{52nd DGPs Congress  - Abstracts}},
  editor       = {{Bermeitinger, Christina and  Greve, Werner}},
  keywords     = {{Cognition, Motivation, Technical Model, Mental Model, Explainer, Explainee, Qualitative Content Analysis}},
  location     = {{Hildesheim}},
  title        = {{{Die Anpassungen von Erklärungen an das Verständnis des Erklärgegenstandes der Gesprächspartner}}},
  year         = {{2022}},
}

@article{51349,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Recent approaches to Explainable AI (XAI) promise to satisfy diverse user expectations by allowing them to steer the interaction in order to elicit content relevant to them. However, little is known about how and to what extent the explainee takes part actively in the process of explaining. To tackle this empirical gap, we exploratively examined naturally occurring everyday explanations in doctor–patient interactions (<jats:italic>N</jats:italic> = 11). Following the social design of XAI, we view explanations as emerging in interactions: first, we identified the verbal behavior of both the explainer and the explainee in the sequential context, which we could assign to phases that were either monological or dialogical; second, we investigated in particular who was responsible for the initiation of the different phases. Finally, we took a closer look at the global conversational structure of explanations by applying a context-sensitive model of organizational jobs, thus adding a third layer of analysis. Results show that in our small sample of conversational explanations, both monological and dialogical phases varied in their length, timing of occurrence (at the early or later stages of the interaction) and their initiation (by the explainer or the explainee). They alternated several times in the course of the interaction. However, we also found some patterns suggesting that all interactions started with a monological phase initiated by the explainer. Both conversational partners contributed to the core organizational job that constitutes an explanation. We interpret the results as an indication for naturally occurring everyday explanations in doctor–patient interactions to be co-constructed on three levels of linguistic description: (1) by switching back and forth between monological to dialogical phases that (2) can be initiated by both partners and (3) by the mutual accomplishment and thus responsibility for an explanation’s core job that is crucial for the success of the explanation. Because of the explorative nature of our study, these results need to be investigated (a) with a larger sample and (b) in other contexts. However, our results suggest that future designs of artificial explainable systems should design the explanatory dialogue in such a way that it includes monological and dialogical phases that can be initiated not only by the explainer but also by the explainee, as both contribute to the core job of explicating procedural, clausal, or conceptual relations in explanations.</jats:p>}},
  author       = {{Fisher, Josephine Beryl and Lohmer, Vivien and Kern, Friederike and Barthlen, Winfried and Gaus, Sebastian and Rohlfing, Katharina}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{317--326}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Exploring monological and dialogical phases in naturally occurring explanations}}},
  doi          = {{10.1007/s13218-022-00787-1}},
  volume       = {{36}},
  year         = {{2022}},
}

