@inproceedings{64872,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philipp and Paassen, Benjamin and Vollmer, Anna-Lisa}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Cognitive and Interactive Adaptivity to the Explainee in an Explanatory Dialogue: An Experimental Study}}},
  doi          = {{10.64136/gumb4700}},
  year         = {{2026}},
}

@inbook{65061,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.
                    <jats:ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001" ext-link-type="uri">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>
                    ) and offer three structures that can help to organize responsibility for
                    <jats:italic>decisions made</jats:italic>
                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.
                  </jats:p>}},
  author       = {{Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{157--177}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Responsibilities in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_9}},
  year         = {{2026}},
}

@book{65065,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Social Explainable AI}}},
  doi          = {{10.1007/978-981-96-5290-7_1}},
  year         = {{2026}},
}

@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@unpublished{59839,
  abstract     = {{In many scientific approaches, especially in those that try to foster explainability of Artificial Intelligences, a narrow conception of explaining prevails. This narrow conception implies that explaining is a one-directional action in which knowledge is transferred from the explainer to an addressee. By studying the amount of agency in metaphors for explaining in scientific texts, we want to find out – or at least to contribute a partial answer to the question – why this narrow conception is so dominant. For our analysis, we use a linguistic conception of agency, transitivity. This concept allows to specify the degree of agency or effectiveness of the action in a verbalised event. It is defined by several component parts. We detail and discuss both the parameters of and global transitivity. Overall, transitivity of explaining metaphors has a rather common pattern across metaphors. Agency is not high and reduced in characteristic aspects: The metaphors imply that the object of explaining is static, i.e., is not changed within the explanation, and that explaining is the activity of one person only. This pattern may account for the narrow conception of explaining. It contrasts strongly with current co-constructive or sociotechnical approaches to explainability.}},
  author       = {{Scharlau, Ingrid and Rohlfing, Katharina J.}},
  publisher    = {{Center for Open Science}},
  title        = {{{Agency in metaphors of explaining: An analysis of scientific texts}}},
  year         = {{2025}},
}

@article{60935,
  abstract     = {{Research suggests that presenting an action via multimodal stimulation (verbal and visual) enhances its perception. To highlight this, in most studies, assertive instructions are generally presented before the occurrence of the visual subevent(s). However, verbal instructions need not always be assertive; they can also include negation to contrast the present event with a prior one, thereby facilitating processing—a phenomenon known as contextual facilitation. In our study, we investigated whether using negation to guide an action sequence facilitates action perception, particularly when two consecutive subactions contrast with each other. Stimuli from previous studies on action demonstration were used to create (non)contrastive actions, that is, a ball following noncontrastive and identical (Over–Over or Under–Under) versus contrastive and opposite paths (Over–Under or Under–Over) before terminating at a goal location. In Experiment 1, either an assertive or a negative instruction was provided as verbal guidance before onset of each path. Analyzing data from 35 participants, we found that, whereas assertive instructions facilitate overall action recall, negating the later path for contrastive actions is equally facilitative. Given that action goal is the most salient aspect in event memory due to goal-path bias in attention, a second experiment was conducted to test the effect of multimodal synchrony on goal attention and action memory. Experiment 2 revealed that when instructions overlap with actions, they become more tailored—assertive instructions effectively guide noncontrastive actions, while assertive–negative instruction particularly guides contrastive actions. Both studies suggest that increased attention to the goal leads to coarser perception of midevents, with action-instruction synchrony modulating goal bias in real-time event apprehension to serve distinct purposes for action conceptualization. Whereas presenting instructions before subactions attenuates goal attention, overlapping instructions increase goal attention and reveal the selective roles of assertive and negative instructions in guiding contrastive and noncontrastive actions.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  journal      = {{Cognitive Science}},
  number       = {{8}},
  publisher    = {{Wiley}},
  title        = {{{Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?}}},
  doi          = {{10.1111/cogs.70096}},
  volume       = {{49}},
  year         = {{2025}},
}

@techreport{61332,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  publisher    = {{OSF}},
  title        = {{{Role Perception Questionnaire: Co-construction. Scales manual}}},
  year         = {{2025}},
}

@inproceedings{61432,
  abstract     = {{This study investigated how action histories – unfolding sequences of actions with objects – provide a context for both attentional allocation and linguistic repair strategies. Building on theories of enactive cognition and sensorimotor contingency theory, we experimentally manipulated action sequences (action history) to create either simple or rich “situational models,” and investigated how these models interact with attention and reflect in linguistic processes during human–robot interaction. Participants (N = 30) engaged in a controlled object placement task with a humanoid robot, where the action (manner) information was either provided or omitted. The omission elicited repair behaviors in participants that were in focus of our investigation. For rich models (competing action possibilities) participants demonstrated: a) increased attentional reorientation, reflecting active engagement with the situational model b) preference for restricted repairs, targeting the specific source of trouble in action selection. Conversely, a simple situational model led to more generalized attention patterns and open repair strategies, suggesting weaker constraints on internal processing. These findings highlight how situational structures emerge externally to scaffold internal cognitive processes, with action histories serving as a crucial context for the interface between perception, action, and language. We discuss how to implement such a tight loop in the assistance of a system.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{IEEE International Conference on Development and Learning (ICDL)}},
  keywords     = {{Attention, Action, Repairs, Task model, HRI, Eyemovement}},
  location     = {{Prague}},
  title        = {{{Manners Matter: Action history guides attention and repair choices during interaction}}},
  doi          = {{10.31234/osf.io/yn2we_v1}},
  year         = {{2025}},
}

@inproceedings{61401,
  abstract     = {{We introduce a method to study online language processes in human--robot interactive setup. In this interaction, language mediated eye movements can be studied as the dialogue unfolds between human and a robot.  
Traditionally, real-time linguistic processes are studied using visual world paradigms (VWP), where either the comprehension or the production tasks are implemented on screens for controlled investigations. Going beyond these traditional and unidirectional approach, we bring together production--comprehension loop with the help of a humanoid robot to preserve interactivity in an ecologically valid yet controlled setup. We discuss the potential of such setups for designing and evaluating findings from language--vision interplay in psycholinguistics. Our setup shows a potential to depart from traditional screen based experiments, balancing the dynamics of the interaction with control of the human behaviors. }},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany}},
  location     = {{Bochum}},
  title        = {{{Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action}}},
  doi          = {{10.17605/OSF.IO/8PR23}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@book{54411,
  abstract     = {{Die Verankerung von Inklusion als Querschnittsthema lehramtsbezogener Studiengänge fordert zu einer interdisziplinären Verständigung über die Vorstellung inklusiver Unterrichtsgestaltung heraus. Die ForscherInnengruppe der Universität Paderborn wählt hierfür als methodischen Ausgangspunkt Vignetten als verdichtete Fallbeispiele aus der Unterrichtspraxis, die aus verschiedenen fachübergreifenden praxistheoretischen Perspektiven gelesen und so gemeinsam diskutiert werden. In der Linie praxistheoretischer Arbeiten reflektiert das Buch Praktiken im inklusiven Fachunterricht in der Spannung zu den normativen (sonder-)pädagogischen bzw. (fach-)didaktischen Vorannahmen, um aus dieser Reflexion eigenes verantwortetes Handeln zu entwickeln. Die Vignettenbündel laden so zum diskursiven Austausch, zur Kontrastierung oder zum "Einfangen" selbst erlebter Unterrichtspraxen auf dem Weg des Verstehens von Inklusion in Theorie und Praxis ein.}},
  editor       = {{Häsel-Weide, Uta and Kammeyer, Katharina and Kruse, Iris and Laubenstein, Désirée and Reis, Oliver and Rohlfing, Katharina and Schröer, Franz}},
  isbn         = {{978-3-17-043026-6}},
  pages        = {{186}},
  publisher    = {{Kohlhammer}},
  title        = {{{Inklusion verstehen - Szenen aus dem Unterricht in interdisziplinärer Reflexion}}},
  year         = {{2024}},
}

@inbook{56135,
  author       = {{Schröer, Franz and Reis, Oliver and Kruse, Iris and Rohlfing, Katharina}},
  booktitle    = {{Inklusion verstehen. Szenen aus dem Unterricht in interdisziplinärer Reflexion}},
  isbn         = {{9783170430259}},
  pages        = {{181--185}},
  publisher    = {{W. Kohlhammer GmbH}},
  title        = {{{Das Forschungsprojekt im Spiegel praxistheoretischer inklusionsbezogener Unterrichtsforschung}}},
  doi          = {{10.17433/978-3-17-043025-9}},
  year         = {{2024}},
}

@inproceedings{56660,
  abstract     = {{In a successful dialogue in general and a successful explanation in specific, partners need to account for both, the task model (what is relevant for the task) and the partner model (what one can con- tribute). The phenomenon of coupling between task and the partner model becomes especially interesting in the context of Human– Robot Interaction where humans have to deal with unknown ca- pabilities of the robot, which can momentarily be perceived when the robot is unable to contribute to the task. Following research on the path over manner prominence in an action [31–33], a robot ex- plained actions to a human by emphasizing two aspects – the path ("where" component) and the manner ("how" component). On criti- cal trials, the robot occasionally omitted one of these components where participants sought missing information for the path or the manner. Participants’ information-seeking and gaze behaviour were analysed. Analysis confirms the initial predictions for, a) task model (path over manner prominence), i.e., earlier information-seeking for path-missing than manner-missing trials, and b) partner model, i.e., while information-seeking is predominantly tied to the attention on the robot’s face, when robot fails to provide resolution, attention shifts more often towards its torso – a behavior likely to indicate an exploration of the robot’s capabilities. An individual-level anal- ysis further confirms that the intra-individual variation in the task model is partly influenced by the perceived capability of the robot.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)}},
  keywords     = {{Explanation, Scaffolding, Eyetracking, Partner Model, HRI}},
  location     = {{San Jose, Costa Rica}},
  title        = {{{Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue}}},
  doi          = {{10.1145/3686215.3689202}},
  year         = {{2024}},
}

@article{56264,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina}},
  journal      = {{Proceedings of the Annual Meeting of the Cognitive Science Society}},
  title        = {{{Changes in partner models – Effects of adaptivity in the course of explanations}}},
  volume       = {{46}},
  year         = {{2024}},
}

@inproceedings{53330,
  author       = {{Tolksdorf, Nils Frederik and Wildt, Eugenia and Rohlfing, Katharina}},
  booktitle    = {{Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction}},
  isbn         = {{9798400703232}},
  pages        = {{1053--1057}},
  publisher    = {{ACM}},
  title        = {{{Preschoolers' Interactions with Social Robots: Investigating the Potential for Eliciting Metatalk and Critical Technological Thinking}}},
  doi          = {{10.1145/3610978.3640654}},
  year         = {{2024}},
}

@article{57466,
  abstract     = {{<jats:p>Nowadays, from early on, children are exposed to technology and media, and six-month-olds are even expected to use some. There is a wide range of products for babies and toddlers. This article reviews the current state of the art, examining the robustness of word knowledge learned using technologies such as e-books, computer games, digital pens, and social robots, and how individual differences among children impact language learning with technology. It aligns with interactive learning theories, positing that learners need to engage in an interaction in order to construct new knowledge.</jats:p>}},
  author       = {{Rohlfing, Katharina and Wildt, Eugenia and Tolksdorf, Nils Frederik}},
  issn         = {{2657-9510}},
  journal      = {{Dzieciństwo. Literatura i Kultura}},
  number       = {{1}},
  pages        = {{35--69}},
  publisher    = {{University of Warsaw}},
  title        = {{{Language Learning with Media and Technology in (Early) Childhood}}},
  doi          = {{10.32798/dlk.1376}},
  volume       = {{6}},
  year         = {{2024}},
}

@article{53072,
  abstract     = {{Negated statements require more processing efforts than assertions. However, in certain contexts, repeating negations undergo adaptation, which over time mitigates the effort.
Here, we ask negations hamper visual processing and whether consecutive repetitions mitigate its influence. 
We assessed the overall attentional capacity and its distribution, the relative weight, quantitatively using 
the formal Theory of Visual Attention (TVA).
We employed a very simple form for negations, binary negations. Negated instructions, expressing the only alternative to the core supposition, were cognitively demanding, resulting in a loss of attentional capacity in three experiments. The overall attentional capacity recovered gradually but stagnated at a lower level than with assertions, even after many repetitions. Additionally, negations distributed the attention equally between target and reference stimulus. Repetitions slightly increased the reference' share of attention. Assertions, on the other hand, shifted the attentional weight towards the target. Few repetitions slightly decreased the bias towards the target, many repetitions increased the bias.}},
  author       = {{Banh, Ngoc Chi and Tünnermann, Jan and Rohlfing, Katharina J. and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  title        = {{{Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution}}},
  doi          = {{10.3389/fpsyg.2024.1451309}},
  volume       = {{15}},
  year         = {{2024}},
}

@inproceedings{57609,
  author       = {{Tykhonenko, Valeriia and Tolksdorf, Nils Frederik and Rohlfing, Katharina}},
  booktitle    = {{Proceedings of the Annual Meeting of the Cognitive Science Society, 46}},
  title        = {{{How turn-timing can inform about becoming familiar with a task and its changes: a study of shy and less shy four-year-old children.}}},
  year         = {{2024}},
}

@article{58708,
  abstract     = {{Research about explanation processes is gaining relevance because of the increased popularity of artificial systems required to explain their function or outcome. Following an interactive approach, not only explainers, but also explainees contribute to successful interactions. However, little is known about how explainees actively guide explanation processes and how their involvement relates to learning. We explored the occurrence and type of explainees’ questions in 20 adult — adult explanation dialogues about unknown present and absent objects. Crucially, we related the question types to the explainees’ subsequent recall of the unknown object labels. We found that explainees asked different types of questions, especially about the object’s label and facts. Questions about the object’s function were asked more when objects were present. In addition, requests for labelling were linked to better recall. The results contribute to designing explainable AI that aims to provide relevant and adaptive explanations and to further experimental approaches to study explanations.}},
  author       = {{Fisher, Josephine Beryl and Rohlfing, Katharina J. and Donnellan, Ed and Grimminger, Angela and Gu, Yan and Vigliocco, Gabriella}},
  journal      = {{Interaction Studies}},
  number       = {{2}},
  pages        = {{244--255}},
  publisher    = {{John Benjamins}},
  title        = {{{ Explain with, rather than explain to: How explainees shape their own learning}}},
  doi          = {{doi.org/10.1075/is.23019.fis}},
  volume       = {{25}},
  year         = {{2024}},
}

