@techreport{65180,
  author       = {{Terfloth, Lutz and Buhl, Heike M. and Lohmer, Vivien and Schaffer, Michael and Kern, Frederike and Schulte, Carsten}},
  title        = {{{Bridging the Dual Nature: How Integrated Explanations Enhance Understanding of Technical Artifacts}}},
  year         = {{2026}},
}

@article{60357,
  abstract     = {{<jats:p>Transcripts play a crucial role in qualitative research in computing education, with significant implications for the credibility and reproducibility of findings. However, unreflective and inconsistent transcription standards may unintentionally introduce biases, potentially undermining the validity of research outcomes and the collective progress of the field. In this article, we introduce transcription as a theoretically guided process rather than a mere preparatory step, illustrating its role using a case example. Additionally, through a systematic review of 107 qualitative research articles in computing education, we identify widespread shortcomings in the reporting and implementation of transcription practices, revealing a need for greater intentionality and transparency. To address these challenges, we propose a three-step framework for selecting, applying, and documenting transcription standards that align with the specific context and goals of a study. Rather than advocating for overly complex, one-size-fits-all transcription strategies, we emphasize the importance of a context-appropriate approach that is clearly communicated to foster trust and reproducibility. By advancing a more robust transcription culture, this work aims to support computing education researchers in adopting standards that enhance the quality and reliability of qualitative research in the field.</jats:p>}},
  author       = {{Terfloth, Lutz and Lohmer, Vivien and Kern, Friederike and Schulte, Carsten}},
  issn         = {{1648-5831}},
  journal      = {{Informatics in Education}},
  publisher    = {{Vilnius University Press}},
  title        = {{{Transcription in Computing Education Research: A Review and Recommendations}}},
  doi          = {{10.15388/infedu.2025.09}},
  year         = {{2025}},
}

@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61243,
  author       = {{Fisher, Josephine Beryl and Terfloth, Lutz}},
  booktitle    = {{ Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2025)}},
  title        = {{{The Dual Nature as a Local Context to Explore Verbal Behaviour in Game Explanations}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@inproceedings{57356,
  author       = {{Schaffer, Michael Erol  and Terfloth, Lutz and Schulte, Carsten and Buhl, Heike M.}},
  location     = {{Valletta, Malta}},
  title        = {{{Perception and Consideration of the Explainees’ Needs for Satisfying Explanations}}},
  year         = {{2024}},
}

@inproceedings{57357,
  author       = {{Schaffer, Michael Erol  and Terfloth, Lutz and Schulte, Carsten and Buhl, Heike M.}},
  booktitle    = {{Joint Proceedings of the xAI-2024 Late-breaking Work, Demos and Doctoral Consortium. 3793}},
  title        = {{{Explainers’ Mental Representations of Explainees’ Needs in Everyday Explanations}}},
  year         = {{2024}},
}

@inproceedings{47448,
  abstract     = {{In XAI it is important to consider that, in contrast to explanations for professional audiences, one cannot assume common expertise when explaining for laypeople. But such explanations between humans vary greatly, making it difficult to research commonalities across explanations. We used the dual nature theory, a techno-philosophical approach, to cope with these challenges. According to it, one can explain, for example, an XAI's decision by addressing its dual nature: by focusing on the Architecture (e.g., the logic of its algorithms) or the Relevance (e.g., the severity of a decision, the implications of a recommendation). We investigated 20 game explanations using the theory as an analytical framework. We elaborate how we used the theory to quickly structure and compare explanations of technological artifacts. We supplemented results from analyzing the explanation contents with results from a video recall to explore how explainers justified their explanation. We found that explainers were focusing on the physical aspects of the game first (Architecture) and only later on aspects of the Relevance. Reasoning in the video recalls indicated that EX regarded the focus on the Architecture as important for structuring the explanation initially by explaining the basic components before focusing on more complex, intangible aspects. Shifting between addressing the two sides was justified by explanation goals, emerging misunderstandings, and the knowledge needs of the explainee. We discovered several commonalities that inspire future research questions which, if further generalizable, provide first ideas for the construction of synthetic explanations.}},
  author       = {{Terfloth, Lutz and Schaffer, Michael and Buhl, Heike M. and Schulte, Carsten}},
  isbn         = {{978-3-031-44069-4}},
  location     = {{Lisboa}},
  publisher    = {{Springer, Cham}},
  title        = {{{Adding Why to What? Analyses of an Everyday Explanation}}},
  doi          = {{10.1007/978-3-031-44070-0_13}},
  year         = {{2023}},
}

@inproceedings{20452,
  abstract     = {{In this paper, we present a novel approach to design teaching interventions for computing education, elaborated using an example of cybersecurity education. Cybersecurity education, similar to other computing education domains, often focuses on one aspect and separate themselves from the other approach. In other words, they focus on one of the two different aspects: a) either teaching how to use and to behave, or b) how technology works. Here we suggest another point of focal awareness for teaching – interaction – that allows the recombination of both approaches in a novel way, leading to a reconstruction of the teaching and learning content in a way that – as we hope – supports an understanding on a higher level and thus gives the chance to better develop agency. For this didactic reconstruction of teaching content, we use an approach called the hybrid interaction system framework.
In cybersecurity training, teaching interventions oftentimes are in a way successful but seem to not lead to long-lasting changes towards secure behavior. Using simply password security as an example, we show how this new approach recombines the two different priory mentioned teaching approaches in a novel way.
Within this short paper, we present our current research progress, discuss potentials and values of the approach in general, and by way of example. Our intention of this submission and early disclosure is to spark discussion and generate further insights especially regarding the following question: What implications does the hybrid interaction system approach have on learning scenarios?}},
  author       = {{Terfloth, Lutz and Budde, Lea and Schulte, Carsten}},
  isbn         = {{9781450389211}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Combining Ideas and Artifacts: An Interaction-Focused View on Computing Education Using a Cybersecurity Example}}},
  doi          = {{10.1145/3428029.3428052}},
  year         = {{2020}},
}

