@inproceedings{47448,
  abstract     = {{In XAI it is important to consider that, in contrast to explanations for professional audiences, one cannot assume common expertise when explaining for laypeople. But such explanations between humans vary greatly, making it difficult to research commonalities across explanations. We used the dual nature theory, a techno-philosophical approach, to cope with these challenges. According to it, one can explain, for example, an XAI's decision by addressing its dual nature: by focusing on the Architecture (e.g., the logic of its algorithms) or the Relevance (e.g., the severity of a decision, the implications of a recommendation). We investigated 20 game explanations using the theory as an analytical framework. We elaborate how we used the theory to quickly structure and compare explanations of technological artifacts. We supplemented results from analyzing the explanation contents with results from a video recall to explore how explainers justified their explanation. We found that explainers were focusing on the physical aspects of the game first (Architecture) and only later on aspects of the Relevance. Reasoning in the video recalls indicated that EX regarded the focus on the Architecture as important for structuring the explanation initially by explaining the basic components before focusing on more complex, intangible aspects. Shifting between addressing the two sides was justified by explanation goals, emerging misunderstandings, and the knowledge needs of the explainee. We discovered several commonalities that inspire future research questions which, if further generalizable, provide first ideas for the construction of synthetic explanations.}},
  author       = {{Terfloth, Lutz and Schaffer, Michael and Buhl, Heike M. and Schulte, Carsten}},
  isbn         = {{978-3-031-44069-4}},
  location     = {{Lisboa}},
  publisher    = {{Springer, Cham}},
  title        = {{{Adding Why to What? Analyses of an Everyday Explanation}}},
  doi          = {{10.1007/978-3-031-44070-0_13}},
  year         = {{2023}},
}

@inproceedings{55909,
  abstract     = {{It is generally assumed that language (written and spoken) follows the entropy rate constancy (ERC) principle, which states that the information density of a text is constant over time. Recently, this has also been found for nonverbal gestures used in monologue, but it is still unclear whether the ERC principle also applies to listeners' nonverbal signals. We focus on listeners' gaze behaviour extracted from video-recorded conversations and trained a transformer-based neural sequence model to process the gaze data of the dialogues and compute its information density. We also compute the information density of the corresponding speech using a pre-trained language model. Our results show (1) that listeners' gaze behaviour in dialogues roughly follows the ERC principle, as well as (2) a congruence between information density of speech and listeners' gaze behaviour.}},
  author       = {{Wang, Yu and Buschmeier, Hendrik}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  location     = {{Singapore}},
  pages        = {{15372–15379}},
  title        = {{{Does listener gaze in face-to-face interaction follow the Entropy Rate Constancy principle: An empirical study}}},
  year         = {{2023}},
}

@inproceedings{48778,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Jagtani, Rohit and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of the World Conference on Explainable Artificial Intelligence (xAI)}},
  isbn         = {{9783031440632}},
  issn         = {{9783031440649}},
  title        = {{{iPDP: On Partial Dependence Plots in Dynamic Modeling Scenarios}}},
  doi          = {{10.1007/978-3-031-44064-9_11}},
  year         = {{2023}},
}

@inbook{48776,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track - European Conference (ECML PKDD)}},
  isbn         = {{9783031434174}},
  issn         = {{1611-3349}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{iSAGE: An Incremental Version of SAGE for Online Explanation on Data Streams}}},
  doi          = {{10.1007/978-3-031-43418-1_26}},
  year         = {{2023}},
}

@inproceedings{48775,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  location     = {{Bruges (Belgium) and online}},
  title        = {{{On Feature Removal for Explainability in Dynamic Environments}}},
  doi          = {{10.14428/ESANN/2023.ES2023-148}},
  year         = {{2023}},
}

@inproceedings{52230,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Kolpaczki, Patrick and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Advances in Neural Information Processing Systems (NeurIPS)}},
  pages        = {{11515----11551}},
  title        = {{{SHAP-IQ: Unified Approximation of any-order Shapley Interactions}}},
  volume       = {{36}},
  year         = {{2023}},
}

@article{61301,
  author       = {{Artelt, André and Visser, Roelof and Hammer, Barbara}},
  issn         = {{0925-2312}},
  journal      = {{Neurocomputing}},
  publisher    = {{Elsevier BV}},
  title        = {{{“I do not know! but why?” — Local model-agnostic example-based explanations of reject}}},
  doi          = {{10.1016/j.neucom.2023.126722}},
  volume       = {{558}},
  year         = {{2023}},
}

@inproceedings{33696,
  author       = {{Wiechmann, Jana and Glarner, Thomas and Rautenberg, Frederik and Wagner, Petra and Haeb-Umbach, Reinhold}},
  booktitle    = {{18. Phonetik und Phonologie im deutschsprachigen Raum (P&P)}},
  location     = {{Bielefeld}},
  title        = {{{Technically enabled explaining of voice characteristics}}},
  year         = {{2022}},
}

@article{51348,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>With the perspective on applications of AI-technology, especially data intensive deep learning approaches, the need for methods to control and understand such models has been recognized and gave rise to a new research domain labeled explainable artificial intelligence (XAI). In this overview paper we give an interim appraisal of what has been achieved so far and where there are still gaps in the research. We take an interdisciplinary perspective to identify challenges on XAI research and point to open questions with respect to the quality of the explanations regarding faithfulness and consistency of explanations. On the other hand we see a need regarding the interaction between XAI and user to allow for adaptability to specific information needs and explanatory dialog for informed decision making as well as the possibility to correct models and explanations by interaction. This endeavor requires an integrated interdisciplinary perspective and rigorous approaches to empirical evaluation based on psychological, linguistic and even sociological theories.</jats:p>}},
  author       = {{Schmid, Ute and Wrede, Britta}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{303--315}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{What is Missing in XAI So Far?}}},
  doi          = {{10.1007/s13218-022-00786-2}},
  volume       = {{36}},
  year         = {{2022}},
}

@article{51366,
  author       = {{Schmid, Ute and Wrede, Britta}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{207--210}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Explainable AI}}},
  doi          = {{10.1007/s13218-022-00788-0}},
  volume       = {{36}},
  year         = {{2022}},
}

@article{51365,
  author       = {{Wrede, Britta}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{2}},
  pages        = {{117--120}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{AI: Back to the Roots?}}},
  doi          = {{10.1007/s13218-022-00773-7}},
  volume       = {{36}},
  year         = {{2022}},
}

@article{51344,
  abstract     = {{<jats:p>Modified action demonstration—dubbed <jats:italic>motionese—</jats:italic>has been proposed as a way to help children recognize the structure and meaning of actions. However, until now, it has been investigated only in young infants. This brief research report presents findings from a cross-sectional study of parental action demonstrations to three groups of 8–11, 12–23, and 24–30-month-old children that applied seven motionese parameters; a second study investigated the youngest group of participants longitudinally to corroborate the cross-sectional results. Results of both studies suggested that four motionese parameters (Motion Pauses, Pace, Velocity, Acceleration) seem to structure the action by organizing it in motion pauses. Whereas these parameters persist over different ages, three other parameters (Demonstration Length, Roundness, and Range) occur predominantly in the younger group and seem to serve to organize infants' attention on the basis of movement. Results are discussed in terms of facilitative vs. pedagogical learning.</jats:p>}},
  author       = {{Rohlfing, Katharina and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta}},
  issn         = {{2297-900X}},
  journal      = {{Frontiers in Communication}},
  keywords     = {{Social Sciences (miscellaneous), Communication}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Which “motionese” parameters change with children's age? Disentangling attention-getting from action-structuring modifications}}},
  doi          = {{10.3389/fcomm.2022.922405}},
  volume       = {{7}},
  year         = {{2022}},
}

@inproceedings{51346,
  author       = {{Groß, André and Schütze, Christian and Wrede, Britta and Richter, Birte}},
  booktitle    = {{INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION}},
  pages        = {{154--159}},
  publisher    = {{ACM}},
  title        = {{{An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems}}},
  doi          = {{10.1145/3536220.3558070}},
  year         = {{2022}},
}

@inproceedings{51347,
  author       = {{Schütze, Christian and Groß, André and Wrede, Britta and Richter, Birte}},
  booktitle    = {{INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION}},
  pages        = {{166--170}},
  publisher    = {{ACM}},
  title        = {{{Enabling Non-Technical Domain Experts to Create Robot-Assisted Therapeutic Scenarios via Visual Programming}}},
  doi          = {{10.1145/3536220.3558072}},
  year         = {{2022}},
}

@inproceedings{55337,
  abstract     = {{As AI is more and more pervasive in everyday life, humans have an increasing demand to understand its behavior and decisions. Most research on explainable AI builds on the premise that there is one ideal explanation to be found. In fact, however, everyday explanations are co-constructed in a dialogue between the person explaining (the explainer) and the specific person being explained to (the explainee). In this paper, we introduce a first corpus of dialogical explanations to enable NLP research on how humans explain as well as on how AI can learn to imitate this process. The corpus consists of 65 transcribed English dialogues from the Wired video series 5 Levels, explaining 13 topics to five explainees of different proficiency. All 1550 dialogue turns have been manually labeled by five independent professionals for the topic discussed as well as for the dialogue act and the explanation move performed. We analyze linguistic patterns of explainers and explainees, and we explore differences across proficiency levels. BERT-based baseline results indicate that sequence information helps predicting topics, acts, and moves effectively.}},
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  editor       = {{Calzolari, Nicoletta and Huang, Chu-Ren and Kim, Hansaem and Pustejovsky, James and Wanner, Leo and Choi, Key-Sun and Ryu, Pum-Mo and Chen, Hsin-Hsi and Donatelli, Lucia and Ji, Heng and Kurohashi, Sadao and Paggio, Patrizia and Xue, Nianwen and Kim, Seokhwan and Hahm, Younggyun and He, Zhong and Lee, Tony Kyungil and Santus, Enrico and Bond, Francis and Na, Seung-Hoon}},
  pages        = {{344–354}},
  publisher    = {{International Committee on Computational Linguistics}},
  title        = {{{“Mama Always Had a Way of Explaining Things So I Could Understand”: A Dialogue Corpus for Learning to Construct Explanations}}},
  year         = {{2022}},
}

@inproceedings{34067,
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2022 Workshop on Figurative Language Processing}},
  title        = {{{Back to the Roots: Predicting the Source Domain of Metaphors using Contrastive Learning}}},
  year         = {{2022}},
}

@inproceedings{46289,
  author       = {{Banh, Ngoc Chi and Scharlau, Ingrid and Rohlfing, Katharina J.}},
  booktitle    = {{52. Kongress der Deutschen Gesellschaft für Psychologie}},
  editor       = {{Bermeitinger, Christina  and Greve, Werner }},
  location     = {{Hildesheim, Germany}},
  title        = {{{Folgen wiederholter Negation auf die Aufmerksamkeit}}},
  year         = {{2022}},
}

@inproceedings{51343,
  abstract     = {{This paper presents preliminary work on the formalization of three prominent cognitive biases in the diagnostic reasoning process over epileptic seizures, psychogenic seizures and syncopes. Diagnostic reasoning is understood as iterative exploration of medical evidence. This exploration is represented as a partially observable Markov decision process where the state (i.e., the correct diagnosis) is uncertain. Observation likelihoods and belief updates are computed using a Bayesian network which defines the interrelation between medical risk factors, diagnoses and potential findings. The decision problem is solved via partially observable upper confidence bounds for trees in Monte-Carlo planning. We compute a biased diagnostic exploration policy by altering the generated state transition, observation and reward during look ahead simulations. The resulting diagnostic policies reproduce reasoning errors which have only been described informally in the medical literature. We plan to use this formal representation in the future to inversely detect and classify biased reasoning in actual diagnostic trajectories obtained from physicians.}},
  author       = {{Battefeld, Dominik and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 8th Workshop on Formal and Cognitive Reasoning}},
  keywords     = {{Diagnostic reasoning, Cognitive bias, Cognitive model, POMDP, Bayesian network, Epilepsy, CDSS}},
  location     = {{Trier}},
  title        = {{{Formalizing cognitive biases in medical diagnostic reasoning}}},
  year         = {{2022}},
}

@article{48780,
  abstract     = {{Explainable Artificial Intelligence (XAI) has mainly focused on static learning tasks so far. In this paper, we consider XAI in the context of online learning in dynamic environments, such as learning from real-time data streams, where models are learned incrementally and continuously adapted over the course of time. More specifically, we motivate the problem of explaining model change, i.e. explaining the difference between models before and after adaptation, instead of the models themselves. In this regard, we provide the first efficient model-agnostic approach to dynamically detecting, quantifying, and explaining significant model changes. Our approach is based on an adaptation of the well-known Permutation Feature Importance (PFI) measure. It includes two hyperparameters that control the sensitivity and directly influence explanation frequency, so that a human user can adjust the method to individual requirements and application needs. We assess and validate our method’s efficacy on illustrative synthetic data streams with three popular model classes.}},
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{211--224}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Agnostic Explanation of Model Change based on Feature Importance}}},
  doi          = {{10.1007/s13218-022-00766-6}},
  volume       = {{36}},
  year         = {{2022}},
}

@inproceedings{32247,
  author       = {{Alshomary, Milad and Rieskamp, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th International Conference on Computational Models of Argument}},
  pages        = {{21 -- 31}},
  title        = {{{Generating Contrastive Snippets for Argument Search}}},
  doi          = {{http://dx.doi.org/10.3233/FAIA220138}},
  year         = {{2022}},
}

