@unpublished{51372,
  abstract     = {{Machine learning is frequently used in affective computing, but presents challenges due the opacity of state-of-the-art machine learning methods. Because of the impact affective machine learning systems may have on an individual's life, it is important that models be made transparent to detect and mitigate biased decision making. In this regard, affective machine learning could benefit from the recent advancements in explainable artificial intelligence (XAI) research. We perform a structured literature review to examine the use of interpretability in the context of affective machine learning. We focus on studies using audio, visual, or audiovisual data for model training and identified 29 research articles. Our findings show an emergence of the use of interpretability methods in the last five years. However, their use is currently limited regarding the range of methods used, the depth of evaluations, and the consideration of use-cases. We outline the main gaps in the research and provide recommendations for researchers that aim to implement interpretable methods for affective machine learning.}},
  author       = {{Johnson, David  and Hakobyan, Olya and Drimalla, Hanna}},
  title        = {{{Towards Interpretability in Audio and Visual Affective Machine Learning: A Review}}},
  year         = {{2023}},
}

@article{51371,
  abstract     = {{<jats:p>In this paper, we investigate the effect of distractions and hesitations as a scaffolding strategy. Recent research points to the potential beneficial effects of a speaker’s hesitations on the listeners’ comprehension of utterances, although results from studies on this issue indicate that humans do not make strategic use of them. The role of hesitations and their communicative function in human-human interaction is a much-discussed topic in current research. To better understand the underlying cognitive processes, we developed a human–robot interaction (HRI) setup that allows the measurement of the electroencephalogram (EEG) signals of a human participant while interacting with a robot. We thereby address the research question of whether we find effects on single-trial EEG based on the distraction and the corresponding robot’s hesitation scaffolding strategy. To carry out the experiments, we leverage our LabLinking method, which enables interdisciplinary joint research between remote labs. This study could not have been conducted without LabLinking, as the two involved labs needed to combine their individual expertise and equipment to achieve the goal together. The results of our study indicate that the EEG correlates in the distracted condition are different from the baseline condition without distractions. Furthermore, we could differentiate the EEG correlates of distraction with and without a hesitation scaffolding strategy. This proof-of-concept study shows that LabLinking makes it possible to conduct collaborative HRI studies in remote laboratories and lays the first foundation for more in-depth research into robotic scaffolding strategies.</jats:p>}},
  author       = {{Richter, Birte and Putze, Felix and Ivucic, Gabriel and Brandt, Mara and Schütze, Christian and Reisenhofer, Rafael and Wrede, Britta and Schultz, Tanja}},
  issn         = {{2414-4088}},
  journal      = {{Multimodal Technologies and Interaction}},
  keywords     = {{Computer Networks and Communications, Computer Science Applications, Human-Computer Interaction, Neuroscience (miscellaneous)}},
  number       = {{4}},
  publisher    = {{MDPI AG}},
  title        = {{{EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study}}},
  doi          = {{10.3390/mti7040037}},
  volume       = {{7}},
  year         = {{2023}},
}

@inproceedings{51370,
  author       = {{Dyck, Leonie and Beierling, Helen and Helmert, Robin and Vollmer, Anna-Lisa}},
  booktitle    = {{Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction}},
  location     = {{Stockholm }},
  pages        = {{720--724}},
  publisher    = {{ACM}},
  title        = {{{Technical Transparency for Robot Navigation Through AR Visualizations}}},
  doi          = {{10.1145/3568294.3580181}},
  year         = {{2023}},
}

@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

@inproceedings{44849,
  author       = {{Rautenberg, Frederik and Kuhlmann, Michael and Ebbers, Janek and Wiechmann, Jana and Seebauer, Fritz and Wagner, Petra and Haeb-Umbach, Reinhold}},
  booktitle    = {{Fortschritte der Akustik - DAGA 2023}},
  location     = {{Hamburg}},
  pages        = {{1409--1412}},
  title        = {{{Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics}}},
  year         = {{2023}},
}

@inbook{54909,
  author       = {{Hanselle, Jonas Manuel and Fürnkranz, Johannes and Hüllermeier, Eyke}},
  booktitle    = {{Discovery Science}},
  isbn         = {{9783031452741}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Probabilistic Scoring Lists for Interpretable Machine Learning}}},
  doi          = {{10.1007/978-3-031-45275-8_13}},
  year         = {{2023}},
}

@inproceedings{55155,
  author       = {{Robrecht, Amelie and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 15th International Conference on Agents and Artificial Intelligence}},
  publisher    = {{SCITEPRESS - Science and Technology Publications}},
  title        = {{{SNAPE: A Sequential Non-Stationary Decision Process Model for Adaptive Explanation Generation}}},
  doi          = {{10.5220/0011671300003393}},
  year         = {{2023}},
}

@inproceedings{55152,
  author       = {{Robrecht, Amelie and Rothgänger, Markus and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents}},
  publisher    = {{ACM}},
  title        = {{{A Study on the Benefits and Drawbacks of Adaptivity in AI-generated Explanations}}},
  doi          = {{10.1145/3570945.3607339}},
  year         = {{2023}},
}

@inproceedings{55406,
  abstract     = {{Metaphorical language, such as {“}spending time together{”}, projects meaning from a source domain (here, $money$) to a target domain ($time$). Thereby, it highlights certain aspects of the target domain, such as the $effort$ behind the time investment. Highlighting aspects with metaphors (while hiding others) bridges the two domains and is the core of metaphorical meaning construction. For metaphor interpretation, linguistic theories stress that identifying the highlighted aspects is important for a better understanding of metaphors. However, metaphor research in NLP has not yet dealt with the phenomenon of highlighting. In this paper, we introduce the task of identifying the main aspect highlighted in a metaphorical sentence. Given the inherent interaction of source domains and highlighted aspects, we propose two multitask approaches - a joint learning approach and a continual learning approach - based on a finetuned contrastive learning model to jointly predict highlighted aspects and source domains. We further investigate whether (predicted) information about a source domain leads to better performance in predicting the highlighted aspects, and vice versa. Our experiments on an existing corpus suggest that, with the corresponding information, the performance to predict the other improves in terms of model accuracy in predicting highlighted aspects and source domains notably compared to the single-task baselines.}},
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Scharlau, Ingrid and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  editor       = {{Bouamor, Houda and Pino, Juan and Bali, Kalika}},
  pages        = {{4636–4659}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Modeling Highlighting of Metaphors in Multitask Contrastive Learning Paradigms}}},
  doi          = {{10.18653/v1/2023.findings-emnlp.308}},
  year         = {{2023}},
}

@inproceedings{51767,
  author       = {{Beer, Fabian  and Schulz, Christian}},
  booktitle    = {{4S Conference (Society for the Social Studies of Science), Honolulu/Hawaii, November 9}},
  title        = {{{The Return of Black Box Theory in Explainable AI}}},
  year         = {{2023}},
}

@inproceedings{51766,
  author       = {{Schulz, Christian and Wilmes , Annedore }},
  title        = {{{Vernacular Metaphors of AI }}},
  year         = {{2023}},
}

@inproceedings{46067,
  abstract     = {{<p>The study investigates two different ways of guiding the addressee of an explanation - an explainee, through action demonstration: contrastive and non-contrastive. Their effect was tested on attention to specific action elements (goal) as well as on event memory. In an eye-tracking experiment, participants were shown different motion videos that were either contrastive or non-contrastive with respect to the segments of movement presentation. Given that everyday action demonstration is often multimodal, the stimuli were created with re- spect to their visual and verbal presentation. For visual presentation, a video combined two movements in a contrastive (e.g., Up-motion following a Down-motion) or non-contrastive way (e.g., two Up-motions following each other). For verbal presentation, each video was combined with a sequence of instruction descriptions in the form of negative (i.e., contrastive) or assertive (i.e., non-contrastive) guidance. It was found that a) attention to the event goal increased for this condition in the later time window, and b) participants’ recall of the event was facilitated when a visually contrastive motion was combined with a verbal contrast.</p>}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)}},
  keywords     = {{Attention, negation, contrastive  guidance, eye-movements, action understanding, event representation}},
  location     = {{Sydney}},
  publisher    = {{Cognitive Science Society}},
  title        = {{{Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall}}},
  year         = {{2023}},
}

@inproceedings{56477,
  abstract     = {{We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS.}},
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  keywords     = {{Explainable AI, Clinical decision support, Bayesian network, Counterfactual explanations}},
  location     = {{Lissabon}},
  title        = {{{A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}}},
  year         = {{2023}},
}

@inproceedings{56478,
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  location     = {{Breckenridge, CO, USA }},
  title        = {{{Dynamic Feature Selection in AI-based Diagnostic Decision Support for Epilepsy}}},
  year         = {{2023}},
}

@unpublished{56663,
  abstract     = {{Explainability has become an important topic in computer science and
artificial intelligence, leading to a subfield called Explainable Artificial
Intelligence (XAI). The goal of providing or seeking explanations is to achieve
(better) 'understanding' on the part of the explainee. However, what it means
to 'understand' is still not clearly defined, and the concept itself is rarely
the subject of scientific investigation. This conceptual article aims to
present a model of forms of understanding in the context of XAI and beyond.
From an interdisciplinary perspective bringing together computer science,
linguistics, sociology, and psychology, a definition of understanding and its
forms, assessment, and dynamics during the process of giving everyday
explanations are explored. Two types of understanding are considered as
possible outcomes of explanations, namely enabledness, 'knowing how' to do or
decide something, and comprehension, 'knowing that' -- both in different
degrees (from shallow to deep). Explanations regularly start with shallow
understanding in a specific domain and can lead to deep comprehension and
enabledness of the explanandum, which we see as a prerequisite for human users
to gain agency. In this process, the increase of comprehension and enabledness
are highly interdependent. Against the background of this systematization,
special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  booktitle    = {{arXiv:2311.08760}},
  title        = {{{Forms of Understanding of XAI-Explanations}}},
  year         = {{2023}},
}

@inproceedings{51367,
  author       = {{Robrecht, Amelie and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 15th International Conference on Agents and Artificial Intelligence}},
  isbn         = {{978-989-758-623-1}},
  location     = {{Lisbon}},
  pages        = {{48--58}},
  publisher    = {{SCITEPRESS - Science and Technology Publications}},
  title        = {{{SNAPE: A Sequential Non-Stationary Decision Process Model for Adaptive Explanation Generation}}},
  doi          = {{10.5220/0011671300003393}},
  year         = {{2023}},
}

@inproceedings{55156,
  author       = {{Fisher, Josephine Beryl and Robrecht, Amelie and Kopp, Stefan and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the 27th Workshop on the Semantics and Pragmatics of Dialogue }},
  location     = {{Maribor}},
  title        = {{{Exploring the Semantic Dialogue Patterns of Explanations – a Case Study of Game Explanations}}},
  year         = {{2023}},
}

@article{50262,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Explainable artificial intelligence has mainly focused on static learning scenarios so far. We are interested in dynamic scenarios where data is sampled progressively, and learning is done in an incremental rather than a batch mode. We seek efficient incremental algorithms for computing feature importance (FI). Permutation feature importance (PFI) is a well-established model-agnostic measure to obtain global FI based on feature marginalization of absent features. We propose an efficient, model-agnostic algorithm called iPFI to estimate this measure incrementally and under dynamic modeling conditions including concept drift. We prove theoretical guarantees on the approximation quality in terms of expectation and variance. To validate our theoretical findings and the efficacy of our approaches in incremental scenarios dealing with streaming data rather than traditional batch settings, we conduct multiple experimental studies on benchmark data with and without concept drift.</jats:p>}},
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara}},
  issn         = {{0885-6125}},
  journal      = {{Machine Learning}},
  keywords     = {{Artificial Intelligence, Software}},
  number       = {{12}},
  pages        = {{4863--4903}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Incremental permutation feature importance (iPFI): towards online explanations on data streams}}},
  doi          = {{10.1007/s10994-023-06385-y}},
  volume       = {{112}},
  year         = {{2023}},
}

@inproceedings{58723,
  abstract     = {{In real-world debates, the most common way to counter an argument is to reason against its main point, that is, its conclusion. Existing work on the automatic generation of natural language counter-arguments does not address the relation to the conclusion, possibly because many arguments leave their conclusion implicit. In this paper, we hypothesize that the key to effective counter-argument generation is to explicitly model the argument‘s conclusion and to ensure that the stance of the generated counter is opposite to that conclusion. In particular, we propose a multitask approach that jointly learns to generate both the conclusion and the counter of an input argument. The approach employs a stance-based ranking component that selects the counter from a diverse set of generated candidates whose stance best opposes the generated conclusion. In both automatic and manual evaluation, we provide evidence that our approach generates more relevant and stance-adhering counters than strong baselines.}},
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics}},
  editor       = {{Vlachos, Andreas and Augenstein, Isabelle}},
  pages        = {{957–967}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Conclusion-based Counter-Argument Generation}}},
  doi          = {{10.18653/v1/2023.eacl-main.67}},
  year         = {{2023}},
}

