@inproceedings{56479,
  abstract     = {{While the importance of explainable artificial intelligence in high-stakes decision-making is widely recognized in existing literature, empirical studies assessing users' perceived value of explanations are scarce. In this paper, we aim to address this shortcoming by conducting an empirical study focused on measuring the perceived value of the following types of explanations: plain explanations based on feature attribution, counterfactual explanations and complex counterfactual explanations. We measure an explanation's value using five dimensions: perceived accuracy, understandability, plausibility, sufficiency of detail, and user satisfaction. Our findings indicate a sweet spot of explanation complexity, with both dimensional and structural complexity positively impacting the perceived value up to a certain threshold.}},
  author       = {{Liedeker, Felix and Düsing, Christoph and Nieveler, Marcel and Cimiano, Philipp}},
  keywords     = {{XAI, Explanation Complexity, User Perception}},
  location     = {{Valetta, Malta}},
  title        = {{{An Empirical Investigation of Users' Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity}}},
  year         = {{2024}},
}

@inproceedings{56660,
  abstract     = {{In a successful dialogue in general and a successful explanation in specific, partners need to account for both, the task model (what is relevant for the task) and the partner model (what one can con- tribute). The phenomenon of coupling between task and the partner model becomes especially interesting in the context of Human– Robot Interaction where humans have to deal with unknown ca- pabilities of the robot, which can momentarily be perceived when the robot is unable to contribute to the task. Following research on the path over manner prominence in an action [31–33], a robot ex- plained actions to a human by emphasizing two aspects – the path ("where" component) and the manner ("how" component). On criti- cal trials, the robot occasionally omitted one of these components where participants sought missing information for the path or the manner. Participants’ information-seeking and gaze behaviour were analysed. Analysis confirms the initial predictions for, a) task model (path over manner prominence), i.e., earlier information-seeking for path-missing than manner-missing trials, and b) partner model, i.e., while information-seeking is predominantly tied to the attention on the robot’s face, when robot fails to provide resolution, attention shifts more often towards its torso – a behavior likely to indicate an exploration of the robot’s capabilities. An individual-level anal- ysis further confirms that the intra-individual variation in the task model is partly influenced by the perceived capability of the robot.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)}},
  keywords     = {{Explanation, Scaffolding, Eyetracking, Partner Model, HRI}},
  location     = {{San Jose, Costa Rica}},
  title        = {{{Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue}}},
  doi          = {{10.1145/3686215.3689202}},
  year         = {{2024}},
}

@inproceedings{57204,
  abstract     = {{In this study on the use of gesture deixis during explanations, a sample of 24 videorecorded dyadic interactions of a board game explanation was analyzed. The relation between the use of gesture deixis by different explainers and their interpretation of explainees' understanding was investigated. In addition, we describe explainers' intra-individual variations related to their interactions with three different explainees consecutively. While we did not find a relation between interpretations of explainees' complete understanding and a decrease in explainers' use of gesture deixis, we demonstrated that the overall use of gesture deixis is related to the process of interactional monitoring and the attendance of a different explainee.}},
  author       = {{Lazarov, Stefan Teodorov and Grimminger, Angela}},
  booktitle    = {{Proceedings of the Annual Meeting of the Cognitive Science Society}},
  keywords     = {{explanation, gesture deixis, monitoring, understanding}},
  location     = {{Rotterdam}},
  title        = {{{Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding}}},
  volume       = {{46}},
  year         = {{2024}},
}

@article{58109,
  abstract     = {{The present study aims to understand how metaphors are used in explanations. According to many current theories, metaphors have a conceptual function for the understanding of abstract objects. From this theoretical assumption, we derived the hypothesis that the lower the expertise of the addressee of an explanation, the more metaphors should be used. We tested this hypothesis on a relatively natural data set of 24 published videos with close to 100,000 words overall in which experts explain abstract, mostly scientific concepts to persons of different expertise, varying from minimal (children) to profound (expert). Contrary to our expectations, the frequency of metaphors did not decrease with expertise, but actually increased. This increase could be statistically substantiated with higher differences in expertise. The study contributes to a better understanding of the use of metaphors in actual explanatory processes and how metaphor use depends on contextual factors. It thus supports the expansion of the conceptual and linguistic perspective on metaphors to include the aspect of how metaphors are used by speakers.}},
  author       = {{Scharlau, Ingrid and Körber, Miriam and Sengupta, Meghdut and Wachsmuth, Henning}},
  journal      = {{Frontiers in Language Sciences}},
  keywords     = {{metaphor, conceptual metaphor, conceptual metaphor theory, metaphor usage, explaining, explanation}},
  pages        = {{1474924}},
  title        = {{{When to use a metaphor: Metaphors in dialogical explanations with addressees of different expertise}}},
  volume       = {{3}},
  year         = {{2024}},
}

@inproceedings{61273,
  abstract     = {{In human-machine explanation interactions, such as tutoring systems or customer support chatbots, it is important for the machine explainer to infer the human user's understanding.  Nonverbal signals play an important role for expressing mental states like understanding and confusion in these interactions. However, an individual's expressions may vary depending on other factors. In cases where these factors are unknown, machine learning methods that infer understanding from nonverbal cues become unreliable. Stress for example has been shown to affect human expression, but it is not clear from the current research how stress affects the expression of understanding.
To address this gap, we design a paradigm that induces understanding and confusion through game rule explanations. During the explanations, self-perceived understanding and confusion are annotated by the participants. A stress condition is also introduced to enable the investigation of changes in the expression of social signals under stress.
We conducted a study to validate the stress induction and participants reported a statistically significant increase in stress during the stress condition compared to the neutral control condition. 
Additionally, feedback from participants shows that the paradigm is effective in inducing understanding and confusion. 
This paradigm paves the way for further studies investigating social signals of understanding to improve human-machine explanation interactions for varying contexts.}},
  author       = {{Paletschek, Jonas}},
  booktitle    = {{12th International Conference on  Affective Computing & Intelligent Interaction}},
  keywords     = {{Understanding, Nonverbal Social Signals, Stress Induction, Explanation, Machine Learning Bias}},
  location     = {{Glasgow}},
  publisher    = {{IEEE}},
  title        = {{{A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress}}},
  doi          = {{10.1109/ACII63134.2024.00040}},
  year         = {{2024}},
}

@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

