@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inbook{61222,
  author       = {{Lenke, Michael and Klowait, Nils and Biere, Lea and Schulte, Carsten}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032012210}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Assessing AI Literacy: A Systematic Review of Questionnaires with Emphasis on Affective, Behavioral, Cognitive, and Ethical Aspects}}},
  doi          = {{10.1007/978-3-032-01222-7_8}},
  year         = {{2025}},
}

@article{61223,
  abstract     = {{<jats:p>Contemporary debates about artificial intelligence (AI) still treat automation as a straightforward substitution of human labor by machines. Drawing on Goffman’s dramaturgical sociology, this paper reframes AI in the workplace as <jats:italic>supplementary</jats:italic> rather than <jats:italic>substitutive</jats:italic> automation. We argue that the central—but routinely overlooked—terrain of struggle is symbolic-interactional: workers continuously stage, conceal, and re-negotiate what counts as “real” work and professional competence. Large language models (LLMs) such as ChatGPT exemplify this dynamic. They quietly take over the invisible, routinised tasks that underpin cognitive occupations (editing, summarizing, first-draft production) while leaving humans to enact the highly visible or relational facets that sustain occupational prestige. Drawing on diverse sources to illustrate our theoretical argument, we show how individual workers, dramaturgical teams, and entire professional fields manage impressions of expertise in order to counter status threats, renegotiate fees, or obscure the extent of AI assistance. The paper itself, having been intentionally written with the ‘aid’ of all presently available frontier AI models, serves as a meta-reflexive performance of professional self-staging. The dramaturgical framework clarifies why utopian tales of friction-free augmentation and dystopian narratives of total displacement both misread how automation is actually unfolding. By foregrounding visibility, obfuscation, and impression management, the article presents a differentiated case for AI’s impact on the performative structure of work, outlines diagnostic tools for assessing real-world AI exposure beyond hype-driven headlines, and argues for a more human-centered basis for evaluating policy responses to the ‘fourth industrial revolution.’ In short, AI enters the labor process not as an autonomous actor, but as a prop within an ongoing social performance—one whose scripts, stages, and audiences remain irreducibly human.</jats:p>}},
  author       = {{Klowait, Nils and Erofeeva, Maria}},
  issn         = {{2297-7775}},
  journal      = {{Frontiers in Sociology}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{The presentation of self in the age of ChatGPT}}},
  doi          = {{10.3389/fsoc.2025.1614473}},
  volume       = {{10}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61225,
  author       = {{Lenke, Michael and Lehner, Lukas and Landman, Martina}},
  booktitle    = {{2025 IEEE Global Engineering Education Conference (EDUCON)}},
  publisher    = {{IEEE}},
  title        = {{{“I'm Actually More Interested in AI Than in Computer Science” - 12-Year-Olds Describing Their First Encounter with AI}}},
  doi          = {{10.1109/educon62633.2025.11016657}},
  year         = {{2025}},
}

@inproceedings{61224,
  author       = {{Lenke, Michael and Schulte, Carsten}},
  booktitle    = {{2025 IEEE Global Engineering Education Conference (EDUCON)}},
  publisher    = {{IEEE}},
  title        = {{{Enhancing AI Interaction through Co-Construction: A Multi-Faceted Workshop Framework}}},
  doi          = {{10.1109/educon62633.2025.11016326}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{56190,
  abstract     = {{This study investigates the potential of using advanced conversational artificial intelligence (AI) to help people understand complex AI systems. In line with conversation-analytic research, we view the participatory role of AI as dynamically unfolding in a situation rather than being predetermined by its architecture. To study user sensemaking of intransparent AI systems, we set up a naturalistic encounter between human participants and two AI systems developed in-house: a reinforcement learning simulation and a GPT-4-based explainer chatbot. Our results reveal that an explainer-AI only truly functions as such when participants actively engage with it as a co-constructive agent. Both the interface’s spatial configuration and the asynchronous temporal nature of the explainer AI – combined with the users’ presuppositions about its role – contribute to the decision whether to treat the AI as a dialogical co-participant in the interaction. Participants establish evidentiality conventions and sensemaking procedures that may diverge from a system’s intended design or function.}},
  author       = {{Klowait, Nils and Erofeeva, Maria and Lenke, Michael and Horwath, Ilona and Buschmeier, Hendrik}},
  journal      = {{Discourse & Communication}},
  number       = {{6}},
  pages        = {{917--930}},
  publisher    = {{Sage}},
  title        = {{{Can AI explain AI? Interactive co-construction of explanations among human and artificial agents}}},
  doi          = {{10.1177/17504813241267069}},
  volume       = {{18}},
  year         = {{2024}},
}

@article{43437,
  abstract     = {{<jats:p>In virtual reality (VR), participants may not always have hands, bodies, eyes, or even voices—using VR helmets and two controllers, participants control an avatar through virtual worlds that do not necessarily obey familiar laws of physics; moreover, the avatar’s bodily characteristics may not neatly match our bodies in the physical world. Despite these limitations and specificities, humans get things done through collaboration and the creative use of the environment. While multiuser interactive VR is attracting greater numbers of participants, there are currently few attempts to analyze the in situ interaction systematically. This paper proposes a video-analytic detail-oriented methodological framework for studying virtual reality interaction. Using multimodal conversation analysis, the paper investigates a nonverbal, embodied, two-person interaction: two players in a survival game strive to gesturally resolve a misunderstanding regarding an in-game mechanic—however, both of their microphones are turned off for the duration of play. The players’ inability to resort to complex language to resolve this issue results in a dense sequence of back-and-forth activity involving gestures, object manipulation, gaze, and body work. Most crucially, timing and modified repetitions of previously produced actions turn out to be the key to overcome both technical and communicative challenges. The paper analyzes these action sequences, demonstrates how they generate intended outcomes, and proposes a vocabulary to speak about these types of interaction more generally. The findings demonstrate the viability of multimodal analysis of VR interaction, shed light on unique challenges of analyzing interaction in virtual reality, and generate broader methodological insights about the study of nonverbal action.</jats:p>}},
  author       = {{Klowait, Nils}},
  issn         = {{2578-1863}},
  journal      = {{Human Behavior and Emerging Technologies}},
  keywords     = {{Human-Computer Interaction, General Social Sciences, Social Psychology, Virtual Reality : Multimodality, Nonverbal Interaction, Search Sequence, Gesture, Co-Operative Action, Goodwin, Ethnomethodology}},
  pages        = {{1--15}},
  publisher    = {{Hindawi Limited}},
  title        = {{{On the Multimodal Resolution of a Search Sequence in Virtual Reality}}},
  doi          = {{10.1155/2023/8417012}},
  volume       = {{2023}},
  year         = {{2023}},
}

@article{45599,
  abstract     = {{<jats:p>We investigate how people with atypical bodily capabilities interact within virtual reality (VR) and the way they overcome interactional challenges in these emerging social environments. Based on a videographic multimodal single case analysis, we demonstrate how non-speaking VR participants furnish their bodies, at-hand instruments, and their interactive environment for their practical purposes. Our findings are subsequently related to renewed discussions of the relationship between agency and environment, and the co-constructed nature of situated action. We thus aim to contribute to the growing vocabulary of atypical interaction analysis and the broader context of ethnomethodological conceptualizations of unorthodox and fractured interactional ecologies.</jats:p>}},
  author       = {{Klowait, Nils and Erofeeva, Maria}},
  issn         = {{2446-3620}},
  journal      = {{Social Interaction. Video-Based Studies of Human Sociality}},
  keywords     = {{General Medicine}},
  number       = {{1}},
  publisher    = {{Det Kgl. Bibliotek/Royal Danish Library}},
  title        = {{{Halting the Decay of Talk}}},
  doi          = {{10.7146/si.v6i1.136903}},
  volume       = {{6}},
  year         = {{2023}},
}

