@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{56190,
  abstract     = {{This study investigates the potential of using advanced conversational artificial intelligence (AI) to help people understand complex AI systems. In line with conversation-analytic research, we view the participatory role of AI as dynamically unfolding in a situation rather than being predetermined by its architecture. To study user sensemaking of intransparent AI systems, we set up a naturalistic encounter between human participants and two AI systems developed in-house: a reinforcement learning simulation and a GPT-4-based explainer chatbot. Our results reveal that an explainer-AI only truly functions as such when participants actively engage with it as a co-constructive agent. Both the interface’s spatial configuration and the asynchronous temporal nature of the explainer AI – combined with the users’ presuppositions about its role – contribute to the decision whether to treat the AI as a dialogical co-participant in the interaction. Participants establish evidentiality conventions and sensemaking procedures that may diverge from a system’s intended design or function.}},
  author       = {{Klowait, Nils and Erofeeva, Maria and Lenke, Michael and Horwath, Ilona and Buschmeier, Hendrik}},
  journal      = {{Discourse & Communication}},
  number       = {{6}},
  pages        = {{917--930}},
  publisher    = {{Sage}},
  title        = {{{Can AI explain AI? Interactive co-construction of explanations among human and artificial agents}}},
  doi          = {{10.1177/17504813241267069}},
  volume       = {{18}},
  year         = {{2024}},
}

@inbook{48643,
  author       = {{Akbulut Irmak, Emine Fulya and Hanses, Hendrik and Horwath, Ilona and Tröster, Thomas}},
  booktitle    = {{Climate Protection, Resource Efficiency, and Sustainable Engineering}},
  isbn         = {{9783837663778}},
  issn         = {{2703-1543}},
  publisher    = {{transcript Verlag}},
  title        = {{{Case Study III: Challenges of lightweight design, vehicles, and rescuers}}},
  doi          = {{10.14361/9783839463772-006}},
  year         = {{2023}},
}

@inbook{48642,
  author       = {{Akbulut Irmak, Emine Fulya and Hanses, Hendrik and Horwath, Ilona and Tröster, Thomas}},
  booktitle    = {{Climate Protection, Resource Efficiency, and Sustainable Engineering}},
  editor       = {{Horwath, Ilona and Schweizer, Swetlana}},
  isbn         = {{9783837663778}},
  issn         = {{2703-1543}},
  publisher    = {{transcript Verlag}},
  title        = {{{Case Study III: Challenges of lightweight design, vehicles, and rescuers}}},
  doi          = {{10.14361/9783839463772-006}},
  year         = {{2023}},
}

@inbook{34165,
  author       = {{Hanses, Hendrik and Akbulut Irmak, Emine Fulya and Horwath, Ilona and Tröster, Thomas}},
  booktitle    = {{ Climate Protection, Resource Efficiency, and Sustainable Engineering. Transdisciplinary Approaches to Design and Manufacturing Technology}},
  editor       = {{Horwath, Ilona and Schweizer, Swetlana}},
  isbn         = {{978-3-8376-6377-8}},
  publisher    = {{transcript Verlag}},
  title        = {{{Challenges of lightweight design, vehicles, and rescuers}}},
  year         = {{2023}},
}

@inproceedings{51752,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing }},
  title        = {{{(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

@inproceedings{39639,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing}},
  title        = {{{(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

@inbook{39638,
  author       = {{Horwath, Ilona}},
  booktitle    = {{Inter- und multidisziplinäre Perspektiven der Geschlechterforschung}},
  editor       = {{Schnegg, Kordula and Tschuggnall, Julia and Voithofer , Caroline and Auer , Manfred}},
  pages        = {{71--101}},
  publisher    = {{innsbruck university press}},
  title        = {{{Algorithmen, KI und soziale Diskriminierung}}},
  volume       = {{4}},
  year         = {{2022}},
}

@inbook{39637,
  author       = {{Horwath, Ilona and Kastein, Mara and Finke, Josefine}},
  booktitle    = {{Care-Arbeit und Gender in der digitalen Transformation}},
  editor       = {{Kastein, Mara and Weber, Lena}},
  pages        = {{119--134}},
  publisher    = {{Juventa Verlag}},
  title        = {{{Waschen, Warten, Wege ebnen. Ambivalente Fürsorge und der männliche Heldenmythos in der Feuerwehr}}},
  year         = {{2022}},
}

@inproceedings{33803,
  author       = {{Hanses, Hendrik and Horwath, Ilona}},
  booktitle    = {{Conference proceedings 38th Danubia Adria Symposium on Advances in Experimental Mechanics}},
  editor       = {{Kourkoulis, Stavros K.}},
  isbn         = {{978-618-86278-0-2}},
  location     = {{Poros}},
  title        = {{{OPERATIONAL AND DEMAND-ORIENTED FIREFIGHTING EQUIPMENT }}},
  year         = {{2022}},
}

@inproceedings{32871,
  author       = {{Triebus, Marcel and Ostermann, Moritz and Tröster, Thomas and Horwath, Ilona}},
  booktitle    = {{Materials in Car Body Engineering - Bad Nauheim}},
  location     = {{Bad Nauheim}},
  title        = {{{Advanced Automotive Components by Fiber-Metal-Laminates}}},
  year         = {{2022}},
}

@article{24456,
  abstract     = {{One objective of current research in explainable intelligent systems is to implement social aspects in order to increase the relevance of explanations. In this paper, we argue that a novel conceptual framework is needed to overcome shortcomings of existing AI systems with little attention to processes of interaction and learning. Drawing from research in interaction and development, we first outline the novel conceptual framework that pushes the design of AI systems toward true interactivity with an emphasis on the role of the partner and social relevance. We propose that AI systems will be able to provide a meaningful and relevant explanation only if the process of explaining is extended to active contribution of both partners that brings about dynamics that is modulated by different levels of analysis. Accordingly, our conceptual framework comprises monitoring and scaffolding as key concepts and claims that the process of explaining is not only modulated by the interaction between explainee and explainer but is embedded into a larger social context in which conventionalized and routinized behaviors are established. We discuss our conceptual framework in relation to the established objectives of transparency and autonomy that are raised for the design of explainable AI systems currently.}},
  author       = {{Rohlfing, Katharina J. and Cimiano, Philipp and Scharlau, Ingrid and Matzner, Tobias and Buhl, Heike M. and Buschmeier, Hendrik and Esposito, Elena and Grimminger, Angela and Hammer, Barbara and Haeb-Umbach, Reinhold and Horwath, Ilona and Hüllermeier, Eyke and Kern, Friederike and Kopp, Stefan and Thommes, Kirsten and Ngonga Ngomo, Axel-Cyrille and Schulte, Carsten and Wachsmuth, Henning and Wagner, Petra and Wrede, Britta}},
  issn         = {{2379-8920}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  keywords     = {{Explainability, process ofexplaining andunderstanding, explainable artificial systems}},
  number       = {{3}},
  pages        = {{717--728}},
  title        = {{{Explanation as a Social Practice: Toward a Conceptual Framework for the Social Design of AI Systems}}},
  doi          = {{10.1109/tcds.2020.3044366}},
  volume       = {{13}},
  year         = {{2021}},
}

@inproceedings{28415,
  author       = {{Hanses, Hendrik and Horwath, Ilona}},
  booktitle    = {{Book of Abstracts 37th Danubia Adria Symposium on Advances in Experimental Mechanics}},
  editor       = {{Holl, Helmut J.}},
  isbn         = {{978-3-9504997-0-4}},
  location     = {{Linz}},
  title        = {{{PROJECT FOR THE DEVELOPMENT OF OPERATIONAL AND DEMAND-ORIENTED FIREFIGHTING EQUIPMENT}}},
  year         = {{2021}},
}

@article{24901,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>In child–robot interaction (cHRI) research, many studies pursue the goal to develop interactive systems that can be applied in everyday settings. For early education, increasingly, the setting of a kindergarten is targeted. However, when cHRI and research are brought into a kindergarten, a range of ethical and related procedural aspects have to be considered and dealt with. While ethical models elaborated within other human–robot interaction settings, e.g., assisted living contexts, can provide some important indicators for relevant issues, we argue that it is important to start developing a systematic approach to identify and tackle those ethical issues which rise with cHRI in kindergarten settings on a more global level and address the impact of the technology from a macroperspective beyond the effects on the individual. Based on our experience in conducting studies with children in general and pedagogical considerations on the role of the institution of kindergarten in specific, in this paper, we enfold some relevant aspects that have barely been addressed in an explicit way in current cHRI research. Four areas are analyzed and key ethical issues are identified in each area: (1) the institutional setting of a kindergarten, (2) children as a vulnerable group, (3) the caregivers’ role, and (4) pedagogical concepts. With our considerations, we aim at (i) broadening the methodology of the current studies within the area of cHRI, (ii) revalidate it based on our comprehensive empirical experience with research in kindergarten settings, both laboratory and real-world contexts, and (iii) provide a framework for the development of a more systematic approach to address the ethical issues in cHRI research within kindergarten settings.</jats:p>}},
  author       = {{Tolksdorf, Nils Frederik and Siebert, Scarlet and Zorn, Isabel and Horwath, Ilona and Rohlfing, Katharina J.}},
  issn         = {{1875-4791}},
  journal      = {{International Journal of Social Robotics}},
  pages        = {{129--140}},
  title        = {{{Ethical Considerations of Applying Robots in Kindergarten Settings: Towards an Approach from a Macroperspective}}},
  doi          = {{10.1007/s12369-020-00622-3}},
  year         = {{2021}},
}

@inproceedings{16052,
  author       = {{Horwath, Ilona and Dohmeier-Fischer, Silvia and Weiß-Borkowski, Nathalie and Tröster, Thomas}},
  booktitle    = {{INTED2018 Proceedings}},
  isbn         = {{9788469794807}},
  location     = {{Valencia}},
  title        = {{{FROM EMPOWERMENT TO INNOVATION: INTER- AND TRANSDISCIPLINARY RESEARCH METHODS IN LIGHTWEIGHT ENGINEERING}}},
  doi          = {{10.21125/inted.2018.1651}},
  year         = {{2018}},
}

@inproceedings{16054,
  author       = {{Weiß-Borkowski, Nathalie and Horwath, Ilona and Berscheid, A. and Dohmeier-Fischer, Silvia and Tröster, Thomas}},
  booktitle    = {{INTED2018 Proceedings}},
  isbn         = {{9788469794807}},
  title        = {{{NEW APPROACHES IN LIGHTWEIGHT DESIGN: V-MODEL OF LIGHTWEIGHT DESIGN BY COMPOSITES AS AN APPROACH OF INTER- AND TRANSDISCIPLINARY RESEARCH}}},
  doi          = {{10.21125/inted.2018.1298}},
  year         = {{2018}},
}

