@inproceedings{61154,
  author       = {{Türk, Olcay and Lazarov, Stefan Teodorov and Buschmeier, Hendrik and Wagner, Petra and Grimminger, Angela}},
  booktitle    = {{LingCologne 2025 – Book of Abstracts}},
  location     = {{Cologne, Germany}},
  pages        = {{36}},
  title        = {{{Acoustic detection of false positive backchannels of understanding in explanations}}},
  year         = {{2025}},
}

@inbook{61150,
  abstract     = {{Since the emergence of the field of eXplainable Artificial Intelligence (XAI), a growing number of researchers have argued that XAI should consider insights from the social sciences in order to adapt explanations to the expectations and needs of human users. This has led to the emergence of a field called Social XAI, which is concerned with understanding how explanations are actively shaped in the interaction between a human user and an AI system. Recognizing this turn in XAI toward making XAI systems more “social” by providing explanations that focus on human information needs and incorporating insights from human–human explanatory interactions, in this paper we provide a formal foundation for Social XAI. We do so by proposing novel ontological accounts of the key terms used in Social XAI based on Basic Formal Ontology (BFO). Specifically, we provide novel ontological accounts for explanandum, explanans, understanding, explanation, explainer, explainee, and context. In doing so, we discuss multifaceted entities in Social XAI (having both continuant and occurrent facets; e.g., explanation) and the relationship between understanding and explanation. Additionally, we propose solutions to seemingly paradoxical views on some terms (e.g., social constructivist vs. individual constructivist perspective on explanandum).}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 15th International Conference on Formal Ontology in Information Systems}},
  isbn         = {{9781643686172}},
  issn         = {{0922-6389}},
  location     = {{Catania, Italy}},
  pages        = {{255–268}},
  publisher    = {{IOS Press}},
  title        = {{{A BFO-based ontological analysis of entities in Social XAI}}},
  doi          = {{10.3233/faia250498}},
  year         = {{2025}},
}

@inproceedings{61229,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Frazzetto, Paolo and Strotherm, Janine and Hermes, Luca and Sperduti, Alessandro and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{The Thirteenth International Conference on Learning Representations (ICLR)}},
  title        = {{{Exact Computation of Any-Order Shapley Interactions for Graph Neural Networks}}},
  year         = {{2025}},
}

@inproceedings{61232,
  author       = {{Visser, Roel and Fumagalli, Fabian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  keywords     = {{FF}},
  title        = {{{Explaining Outliers using Isolation Forest and Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{61231,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara and Herbinger, Julia}},
  booktitle    = {{Proceedings of The 28th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{5140--5148}},
  publisher    = {{PMLR}},
  title        = {{{Unifying Feature-Based Explanations with Functional ANOVA and Cooperative Game Theory}}},
  volume       = {{258}},
  year         = {{2025}},
}

@inproceedings{61153,
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Abstracts of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  location     = {{Bielefeld, Germany}},
  title        = {{{A BFO-based ontology of context for Social XAI}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{59856,
  abstract     = {{Recent advances on instruction fine-tuning have led to the development of various prompting techniques for large language models, such as explicit reasoning steps. However, the success of techniques depends on various parameters, such as the task, language model, and context provided. Finding an effective prompt is, therefore, often a trial-and-error process. Most existing approaches to automatic prompting aim to optimize individual techniques instead of compositions of techniques and their dependence on the input. To fill this gap, we propose an adaptive prompting approach that predicts the optimal prompt composition ad-hoc for a given input. We apply our approach to social bias detection, a highly context-dependent task that requires semantic understanding. We evaluate it with three large language models on three datasets, comparing compositions to individual techniques and other baselines. The results underline the importance of finding an effective prompt composition. Our approach robustly ensures high detection performance, and is best in several settings. Moreover, first experiments on other tasks support its generalizability.}},
  author       = {{Spliethöver, Maximilian and Knebler, Tim and Fumagalli, Fabian and Muschalik, Maximilian and Hammer, Barbara and Hüllermeier, Eyke and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Chiruzzo, Luis and Ritter, Alan and Wang, Lu}},
  isbn         = {{979-8-89176-189-6}},
  pages        = {{2421–2449}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Adaptive Prompting: Ad-hoc Prompt Composition for Social Bias Detection}}},
  year         = {{2025}},
}

@book{61178,
  editor       = {{Ilinykh, Nikolai and Robrecht, Amelie and Kopp, Stefan and Buschmeier, Hendrik}},
  issn         = {{2308-2275}},
  location     = {{Bielefeld, Germany}},
  pages        = {{271+viii}},
  title        = {{{SemDial 2025 – Bialogue. Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue}}},
  year         = {{2025}},
}

@inproceedings{61243,
  author       = {{Fisher, Josephine Beryl and Terfloth, Lutz}},
  booktitle    = {{ Proceedings of the 29th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2025)}},
  title        = {{{The Dual Nature as a Local Context to Explore Verbal Behaviour in Game Explanations}}},
  year         = {{2025}},
}

@unpublished{61294,
  abstract     = {{Human-AI collaboration is increasingly promoted to improve high-stakes decision-making, yet its benefits have not been fully realized. Application-grounded evaluations are needed to better evaluate methods for improving collaboration but often require domain experts, making studies costly and limiting their generalizability. Current evaluation methods are constrained by limited public datasets and reliance on proxy tasks. To address these challenges, we propose an application-grounded framework for large-scale, online evaluations of vision-based decision-making tasks. The framework introduces Blockies, a parametric approach for generating datasets of simulated diagnostic tasks, offering control over the traits and biases in the data used to train real-world models. These tasks are designed to be easy to learn but difficult to master, enabling participation by non-experts. The framework also incorporates storytelling and monetary incentives to manipulate perceived task stakes. An initial empirical study demonstrated that the high-stakes condition significantly reduced healthy distrust of AI, despite longer decision-making times. These findings underscore the importance of perceived stakes in fostering healthy distrust and demonstrate the framework's potential for scalable evaluation of high-stakes Human-AI collaboration. }},
  author       = {{Johnson, David S.}},
  title        = {{{Higher Stakes, Healthier Trust? An Application-Grounded Approach to Assessing Healthy Trust in High-Stakes Human-AI Collaboration}}},
  year         = {{2025}},
}

@inproceedings{61421,
  author       = {{Battefeld, Dominik and Kopp, Stefan}},
  booktitle    = {{Proceedings of KogWis 2025: Conference of the German Cognitive Science Society}},
  location     = {{Bochum, Germany}},
  title        = {{{Implementing a computational cognitive process model of medical diagnostic reasoning}}},
  year         = {{2025}},
}

@inproceedings{61432,
  abstract     = {{This study investigated how action histories – unfolding sequences of actions with objects – provide a context for both attentional allocation and linguistic repair strategies. Building on theories of enactive cognition and sensorimotor contingency theory, we experimentally manipulated action sequences (action history) to create either simple or rich “situational models,” and investigated how these models interact with attention and reflect in linguistic processes during human–robot interaction. Participants (N = 30) engaged in a controlled object placement task with a humanoid robot, where the action (manner) information was either provided or omitted. The omission elicited repair behaviors in participants that were in focus of our investigation. For rich models (competing action possibilities) participants demonstrated: a) increased attentional reorientation, reflecting active engagement with the situational model b) preference for restricted repairs, targeting the specific source of trouble in action selection. Conversely, a simple situational model led to more generalized attention patterns and open repair strategies, suggesting weaker constraints on internal processing. These findings highlight how situational structures emerge externally to scaffold internal cognitive processes, with action histories serving as a crucial context for the interface between perception, action, and language. We discuss how to implement such a tight loop in the assistance of a system.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{IEEE International Conference on Development and Learning (ICDL)}},
  keywords     = {{Attention, Action, Repairs, Task model, HRI, Eyemovement}},
  location     = {{Prague}},
  title        = {{{Manners Matter: Action history guides attention and repair choices during interaction}}},
  doi          = {{10.31234/osf.io/yn2we_v1}},
  year         = {{2025}},
}

@misc{61429,
  author       = {{Buschmeier, Hendrik and Grimminger, Angela and Wagner, Petra and Lazarov, Stefan Teodorov and Türk, Olcay and Wang, Yu}},
  publisher    = {{LibreCat University}},
  title        = {{{MUNDEX Annotations}}},
  doi          = {{10.5281/ZENODO.17129817}},
  year         = {{2025}},
}

@inproceedings{61401,
  abstract     = {{We introduce a method to study online language processes in human--robot interactive setup. In this interaction, language mediated eye movements can be studied as the dialogue unfolds between human and a robot.  
Traditionally, real-time linguistic processes are studied using visual world paradigms (VWP), where either the comprehension or the production tasks are implemented on screens for controlled investigations. Going beyond these traditional and unidirectional approach, we bring together production--comprehension loop with the help of a humanoid robot to preserve interactivity in an ecologically valid yet controlled setup. We discuss the potential of such setups for designing and evaluating findings from language--vision interplay in psycholinguistics. Our setup shows a potential to depart from traditional screen based experiments, balancing the dynamics of the interaction with control of the human behaviors. }},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany}},
  location     = {{Bochum}},
  title        = {{{Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action}}},
  doi          = {{10.17605/OSF.IO/8PR23}},
  year         = {{2025}},
}

@inbook{62305,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana}},
  booktitle    = {{Digital Development. Technology, Ethics and Governance}},
  editor       = {{Farina, Mirko  and Yu, Xiao  and Chen, Jin}},
  isbn         = {{9781003567622}},
  publisher    = {{Routledge}},
  title        = {{{Explainability and AI Governance}}},
  doi          = {{10.4324/9781003567622-22}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@inproceedings{53069,
  author       = {{Banh, Ngoc Chi and Scharlau, Ingrid}},
  location     = {{Regensburg}},
  title        = {{{Effects of task difficulty on visual processing speed}}},
  year         = {{2024}},
}

@inproceedings{54889,
  abstract     = {{To reach the goal of zero traffic fatalities a year, one building block is the proposition to develop advanced assistance systems for vulnerable road users (VRUs) such as bicyclists. We focus on the dooring problem, i.e., car doors being opened inattentively in the way of an approaching cyclist. We extended our vehicle to everything (V2X) communication-enabled virtual cycling environment for dooring experiments. Our system extends toolkits that are widely used in the V2X research community. We showcase how such a system may be used to realize and evaluate distributed algorithms for VRU safety solutions such as dooring prevention.}},
  author       = {{Stratmann, Lukas and Banh, Ngoc Chi and Scharlau, Ingrid and Dressler, Falko}},
  booktitle    = {{ACM Symposium on Principles of Distributed Computing (PODC 2024), Advanced tools, programming languages, and PLatforms for Implementing and Evaluating algorithms for Distributed systems (ApPLIED 2024)}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Safety Assistance Systems for Bicyclists: Toward Empirical Studies of the Dooring Problem}}},
  doi          = {{10.1145/3663338.3665831}},
  year         = {{2024}},
}

@article{54911,
  author       = {{Heid, Stefan and Hanselle, Jonas Manuel and Fürnkranz, Johannes and Hüllermeier, Eyke}},
  issn         = {{0888-613X}},
  journal      = {{International Journal of Approximate Reasoning}},
  publisher    = {{Elsevier BV}},
  title        = {{{Learning decision catalogues for situated decision making: The case of scoring systems}}},
  doi          = {{10.1016/j.ijar.2024.109190}},
  volume       = {{171}},
  year         = {{2024}},
}

