@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61324,
  author       = {{Wagner, Petra and Kopp, Stefan}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{433--446}},
  publisher    = {{Springer}},
  title        = {{{Timing and synchronization of multimodal signals in explanations}}},
  doi          = {{10.1007/978-981-96-5290-7_22}},
  year         = {{2026}},
}

@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@unpublished{61151,
  abstract     = {{In this paper, we discuss the application of retrospective video recall for the assessment of cognitive processes in explanatory interactions, such as understanding and mental models. Our purpose is to reflect on the benefits and limitations of video recall compared to another self-report method, ‘thinking-aloud’. To do so, we reveal empirical results from the application of video recall in three interdisciplinary research projects that applied the method for the qualitative and quantitative assessment of cognitive and behavioral phenomena in everyday explanations. In all three projects, video recall was applied as a post-hoc procedure following the recording of dyadic face-to-face explanations of board games. The design of the video recall procedure differed between individual projects because they pursued different research objectives – that is the investigation of (1) an interlocutor's multimodal signals of understanding, (2) the change in assumptions about an interlocutor's dispositional and situational knowledge, and (3) the differentiated assessment of an interlocutor's developing understanding of domain knowledge aspects by distinguishing between mechanistic and functional explanatory stances. By discussing the benefits and the limitations of each procedure, this article provides critical reflections on video recall as a versatile research method applied for the analysis of human multimodal behavior in interaction and cognitive processing.}},
  author       = {{Lazarov, Stefan Teodorov and Schaffer, Michael and Gladow, Viviane and Buschmeier, Hendrik and Buhl, Heike M. and Grimminger, Angela}},
  pages        = {{29}},
  title        = {{{Retrospective video recall for analyzing cognitive processes in naturalistic explanations}}},
  year         = {{2026}},
}

@book{61178,
  editor       = {{Ilinykh, Nikolai and Robrecht, Amelie and Kopp, Stefan and Buschmeier, Hendrik}},
  issn         = {{2308-2275}},
  location     = {{Bielefeld, Germany}},
  pages        = {{271+viii}},
  title        = {{{SemDial 2025 – Bialogue. Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue}}},
  year         = {{2025}},
}

@inproceedings{61243,
  author       = {{Fisher, Josephine Beryl and Terfloth, Lutz}},
  booktitle    = {{ Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2025)}},
  title        = {{{The Dual Nature as a Local Context to Explore Verbal Behaviour in Game Explanations}}},
  year         = {{2025}},
}

@techreport{61332,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  publisher    = {{OSF}},
  title        = {{{Role Perception Questionnaire: Co-construction. Scales manual}}},
  year         = {{2025}},
}

@techreport{61433,
  author       = {{Buhl, Heike M. and Herrmann, Paula and Bolinger, Dean X.}},
  publisher    = {{OSF}},
  title        = {{{TRR 318, Project A01, WP 2.1. Scales manual}}},
  year         = {{2025}},
}

@techreport{61434,
  author       = {{Buhl, Heike M. and Herrmann, Paula and Bolinger, Dean X.}},
  publisher    = {{OSF}},
  title        = {{{TRR 318, Project A01, WP 2.2. Scales manual}}},
  year         = {{2025}},
}

@article{57531,
  author       = {{Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}},
  journal      = {{AI and Ethics}},
  pages        = {{3015–3033}},
  publisher    = {{Springer}},
  title        = {{{Explanation needs and ethical demands: unpacking the instrumental value of XAI}}},
  doi          = {{https://doi.org/10.1007/s43681-024-00622-3}},
  volume       = {{5}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@unpublished{55154,
  abstract     = {{In human interaction, gestures serve various functions such as marking speech
rhythm, highlighting key elements, and supplementing information. These
gestures are also observed in explanatory contexts. However, the impact of
gestures on explanations provided by virtual agents remains underexplored. A
user study was carried out to investigate how different types of gestures
influence perceived interaction quality and listener understanding. This study
addresses the effect of gestures in explanation by developing an embodied
virtual explainer integrating both beat gestures and iconic gestures to enhance
its automatically generated verbal explanations. Our model combines beat
gestures generated by a learned speech-driven synthesis module with manually
captured iconic gestures, supporting the agent's verbal expressions about the
board game Quarto! as an explanation scenario. Findings indicate that neither
the use of iconic gestures alone nor their combination with beat gestures
outperforms the baseline or beat-only conditions in terms of understanding.
Nonetheless, compared to prior research, the embodied agent significantly
enhances understanding.}},
  author       = {{Robrecht, Amelie and Voss, Hendric and Gottschalk, Lisa and Kopp, Stefan}},
  booktitle    = {{arXiv:2406.12544}},
  title        = {{{Integrating Representational Gestures into Automatically Generated  Embodied Explanations and its Effects on Understanding and Interaction  Quality}}},
  year         = {{2024}},
}

@article{56264,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina}},
  journal      = {{Proceedings of the Annual Meeting of the Cognitive Science Society}},
  title        = {{{Changes in partner models – Effects of adaptivity in the course of explanations}}},
  volume       = {{46}},
  year         = {{2024}},
}

@article{58708,
  abstract     = {{Research about explanation processes is gaining relevance because of the increased popularity of artificial systems required to explain their function or outcome. Following an interactive approach, not only explainers, but also explainees contribute to successful interactions. However, little is known about how explainees actively guide explanation processes and how their involvement relates to learning. We explored the occurrence and type of explainees’ questions in 20 adult — adult explanation dialogues about unknown present and absent objects. Crucially, we related the question types to the explainees’ subsequent recall of the unknown object labels. We found that explainees asked different types of questions, especially about the object’s label and facts. Questions about the object’s function were asked more when objects were present. In addition, requests for labelling were linked to better recall. The results contribute to designing explainable AI that aims to provide relevant and adaptive explanations and to further experimental approaches to study explanations.}},
  author       = {{Fisher, Josephine Beryl and Rohlfing, Katharina J. and Donnellan, Ed and Grimminger, Angela and Gu, Yan and Vigliocco, Gabriella}},
  journal      = {{Interaction Studies}},
  number       = {{2}},
  pages        = {{244--255}},
  publisher    = {{John Benjamins}},
  title        = {{{ Explain with, rather than explain to: How explainees shape their own learning}}},
  doi          = {{doi.org/10.1075/is.23019.fis}},
  volume       = {{25}},
  year         = {{2024}},
}

@inproceedings{55155,
  author       = {{Robrecht, Amelie and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 15th International Conference on Agents and Artificial Intelligence}},
  publisher    = {{SCITEPRESS - Science and Technology Publications}},
  title        = {{{SNAPE: A Sequential Non-Stationary Decision Process Model for Adaptive Explanation Generation}}},
  doi          = {{10.5220/0011671300003393}},
  year         = {{2023}},
}

@inproceedings{55152,
  author       = {{Robrecht, Amelie and Rothgänger, Markus and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents}},
  publisher    = {{ACM}},
  title        = {{{A Study on the Benefits and Drawbacks of Adaptivity in AI-generated Explanations}}},
  doi          = {{10.1145/3570945.3607339}},
  year         = {{2023}},
}

@inproceedings{51367,
  author       = {{Robrecht, Amelie and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 15th International Conference on Agents and Artificial Intelligence}},
  isbn         = {{978-989-758-623-1}},
  location     = {{Lisbon}},
  pages        = {{48--58}},
  publisher    = {{SCITEPRESS - Science and Technology Publications}},
  title        = {{{SNAPE: A Sequential Non-Stationary Decision Process Model for Adaptive Explanation Generation}}},
  doi          = {{10.5220/0011671300003393}},
  year         = {{2023}},
}

