@inbook{61220,
  abstract     = {{This chapter presents recurring structures of interactions—and their associated goals—as they occur in explaining processes. It explores how explanations are not delivered in isolation but unfold through dynamic, structured sequences of interaction between participants. Beginning with the smallest units, we examine how individual dialog acts and multimodal signals form micro-patterns within turns. These, in turn, compose meso-level structures such as pragmatic frames, that organize sequences of interaction into meaningful, goal-oriented episodes. At the macro-level, we identify common types of explanatory dialogues, such as inquiry, information-seeking, or deliberation, which are shaped by participants’ goals and situational demands. The chapter highlights how these abstract patterns of structure are instantiated differently across social and situational contexts and proposes that understanding them is crucial for designing socially intelligent and adaptive XAI systems. By analyzing how these structures emerge and function, we o!er a framework for operationalizing explanation structures in a way that supports co-constructive and context-sensitive human-AI interaction.}},
  author       = {{Jimenez, Patricia and Vollmer, Anna Lisa and Wachsmuth, Henning }},
  booktitle    = {{Social Explainable AI: Communications of NII Shonan Meetings}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  publisher    = {{Springer Singapore}},
  title        = {{{Structures Underlying Explanations}}},
  year         = {{2026}},
}

@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@unpublished{59917,
  abstract     = {{nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.}},
  author       = {{Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}},
  booktitle    = {{arXiv}},
  title        = {{{Healthy Distrust in AI systems}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{51345,
  abstract     = {{<jats:p> The algorithmic imaginary as a theoretical concept has received increasing attention in recent years as it aims at users’ appropriation of algorithmic processes operating in opacity. But the concept originally only starts from the users’ point of view, while the processes on the platforms’ side are largely left out. In contrast, this paper argues that what is true for users is also valid for algorithmic processes and the designers behind. On the one hand, the algorithm imagines users’ future behavior via machine learning, which is supposed to predict all their future actions. On the other hand, the designers anticipate different actions that could potentially performed by users with every new implementation of features such as social media feeds. In order to bring into view this permanently reciprocal interplay coupled to the imaginary, in which not only the users are involved, I will argue for a more comprehensive and theoretically precise algorithmic imaginary referring to the theory of Cornelius Castoriadis. In such a perspective, an important contribution can be formulated for a theory of social media platforms that goes beyond praxeocentrism or structural determinism. </jats:p>}},
  author       = {{Schulz, Christian}},
  issn         = {{0163-4437}},
  journal      = {{Media, Culture & Society}},
  keywords     = {{Sociology and Political Science, Communication}},
  number       = {{3}},
  pages        = {{646--655}},
  publisher    = {{SAGE Publications}},
  title        = {{{A new algorithmic imaginary}}},
  doi          = {{10.1177/01634437221136014}},
  volume       = {{45}},
  year         = {{2023}},
}

@inproceedings{51766,
  author       = {{Schulz, Christian and Wilmes , Annedore }},
  title        = {{{Vernacular Metaphors of AI }}},
  year         = {{2023}},
}

@inproceedings{51752,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing }},
  title        = {{{(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

@inproceedings{39639,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing}},
  title        = {{{(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

