@inproceedings{64872,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philipp and Paassen, Benjamin and Vollmer, Anna-Lisa}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Cognitive and Interactive Adaptivity to the Explainee in an Explanatory Dialogue: An Experimental Study}}},
  doi          = {{10.64136/gumb4700}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@inproceedings{61243,
  author       = {{Fisher, Josephine Beryl and Terfloth, Lutz}},
  booktitle    = {{ Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2025)}},
  title        = {{{The Dual Nature as a Local Context to Explore Verbal Behaviour in Game Explanations}}},
  year         = {{2025}},
}

@techreport{61332,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  publisher    = {{OSF}},
  title        = {{{Role Perception Questionnaire: Co-construction. Scales manual}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{56264,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina}},
  journal      = {{Proceedings of the Annual Meeting of the Cognitive Science Society}},
  title        = {{{Changes in partner models – Effects of adaptivity in the course of explanations}}},
  volume       = {{46}},
  year         = {{2024}},
}

@article{58708,
  abstract     = {{Research about explanation processes is gaining relevance because of the increased popularity of artificial systems required to explain their function or outcome. Following an interactive approach, not only explainers, but also explainees contribute to successful interactions. However, little is known about how explainees actively guide explanation processes and how their involvement relates to learning. We explored the occurrence and type of explainees’ questions in 20 adult — adult explanation dialogues about unknown present and absent objects. Crucially, we related the question types to the explainees’ subsequent recall of the unknown object labels. We found that explainees asked different types of questions, especially about the object’s label and facts. Questions about the object’s function were asked more when objects were present. In addition, requests for labelling were linked to better recall. The results contribute to designing explainable AI that aims to provide relevant and adaptive explanations and to further experimental approaches to study explanations.}},
  author       = {{Fisher, Josephine Beryl and Rohlfing, Katharina J. and Donnellan, Ed and Grimminger, Angela and Gu, Yan and Vigliocco, Gabriella}},
  journal      = {{Interaction Studies}},
  number       = {{2}},
  pages        = {{244--255}},
  publisher    = {{John Benjamins}},
  title        = {{{ Explain with, rather than explain to: How explainees shape their own learning}}},
  doi          = {{doi.org/10.1075/is.23019.fis}},
  volume       = {{25}},
  year         = {{2024}},
}

@inproceedings{55156,
  author       = {{Fisher, Josephine Beryl and Robrecht, Amelie and Kopp, Stefan and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the 27th Workshop on the Semantics and Pragmatics of Dialogue }},
  location     = {{Maribor}},
  title        = {{{Exploring the Semantic Dialogue Patterns of Explanations – a Case Study of Game Explanations}}},
  year         = {{2023}},
}

@article{51349,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Recent approaches to Explainable AI (XAI) promise to satisfy diverse user expectations by allowing them to steer the interaction in order to elicit content relevant to them. However, little is known about how and to what extent the explainee takes part actively in the process of explaining. To tackle this empirical gap, we exploratively examined naturally occurring everyday explanations in doctor–patient interactions (<jats:italic>N</jats:italic> = 11). Following the social design of XAI, we view explanations as emerging in interactions: first, we identified the verbal behavior of both the explainer and the explainee in the sequential context, which we could assign to phases that were either monological or dialogical; second, we investigated in particular who was responsible for the initiation of the different phases. Finally, we took a closer look at the global conversational structure of explanations by applying a context-sensitive model of organizational jobs, thus adding a third layer of analysis. Results show that in our small sample of conversational explanations, both monological and dialogical phases varied in their length, timing of occurrence (at the early or later stages of the interaction) and their initiation (by the explainer or the explainee). They alternated several times in the course of the interaction. However, we also found some patterns suggesting that all interactions started with a monological phase initiated by the explainer. Both conversational partners contributed to the core organizational job that constitutes an explanation. We interpret the results as an indication for naturally occurring everyday explanations in doctor–patient interactions to be co-constructed on three levels of linguistic description: (1) by switching back and forth between monological to dialogical phases that (2) can be initiated by both partners and (3) by the mutual accomplishment and thus responsibility for an explanation’s core job that is crucial for the success of the explanation. Because of the explorative nature of our study, these results need to be investigated (a) with a larger sample and (b) in other contexts. However, our results suggest that future designs of artificial explainable systems should design the explanatory dialogue in such a way that it includes monological and dialogical phases that can be initiated not only by the explainer but also by the explainee, as both contribute to the core job of explicating procedural, clausal, or conceptual relations in explanations.</jats:p>}},
  author       = {{Fisher, Josephine Beryl and Lohmer, Vivien and Kern, Friederike and Barthlen, Winfried and Gaus, Sebastian and Rohlfing, Katharina}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{317--326}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Exploring monological and dialogical phases in naturally occurring explanations}}},
  doi          = {{10.1007/s13218-022-00787-1}},
  volume       = {{36}},
  year         = {{2022}},
}

