[{"user_id":"103339","_id":"61220","project":[{"_id":"122","name":"TRR 318 - Subproject B3"}],"language":[{"iso":"eng"}],"publication":"Social Explainable AI: Communications of NII Shonan Meetings","type":"book_chapter","status":"public","abstract":[{"lang":"eng","text":"This chapter presents recurring structures of interactions—and their associated goals—as they occur in explaining processes. It explores how explanations are not delivered in isolation but unfold through dynamic, structured sequences of interaction between participants. Beginning with the smallest units, we examine how individual dialog acts and multimodal signals form micro-patterns within turns. These, in turn, compose meso-level structures such as pragmatic frames, that organize sequences of interaction into meaningful, goal-oriented episodes. At the macro-level, we identify common types of explanatory dialogues, such as inquiry, information-seeking, or deliberation, which are shaped by participants’ goals and situational demands. The chapter highlights how these abstract patterns of structure are instantiated differently across social and situational contexts and proposes that understanding them is crucial for designing socially intelligent and adaptive XAI systems. By analyzing how these structures emerge and function, we o!er a framework for operationalizing explanation structures in a way that supports co-constructive and context-sensitive human-AI interaction."}],"editor":[{"full_name":"Rohlfing, Katharina","last_name":"Rohlfing","first_name":"Katharina"},{"first_name":"Kary","full_name":"Främling, Kary","last_name":"Främling"},{"first_name":"Brian","full_name":"Lim, Brian","last_name":"Lim"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana"},{"first_name":"Kirsten","last_name":"Thommes","full_name":"Thommes, Kirsten"}],"author":[{"first_name":"Patricia","last_name":"Jimenez","id":"103339","full_name":"Jimenez, Patricia"},{"full_name":"Vollmer, Anna Lisa","last_name":"Vollmer","first_name":"Anna Lisa"},{"first_name":"Henning ","full_name":"Wachsmuth, Henning ","last_name":"Wachsmuth"}],"date_created":"2025-09-11T13:54:27Z","date_updated":"2025-09-12T11:43:07Z","publisher":"Springer Singapore","title":"Structures Underlying Explanations","publication_identifier":{"eisbn":["978-981-96-5290-7"]},"publication_status":"inpress","citation":{"ieee":"P. Jimenez, A. L. Vollmer, and H. Wachsmuth, “Structures Underlying Explanations,” in <i>Social Explainable AI: Communications of NII Shonan Meetings</i>, K. Rohlfing, K. Främling, B. Lim, S. Alpsancar, and K. Thommes, Eds. Springer Singapore.","chicago":"Jimenez, Patricia, Anna Lisa Vollmer, and Henning  Wachsmuth. “Structures Underlying Explanations.” In <i>Social Explainable AI: Communications of NII Shonan Meetings</i>, edited by Katharina Rohlfing, Kary Främling, Brian Lim, Suzana Alpsancar, and Kirsten Thommes. Springer Singapore, n.d.","ama":"Jimenez P, Vollmer AL, Wachsmuth H. Structures Underlying Explanations. In: Rohlfing K, Främling K, Lim B, Alpsancar S, Thommes K, eds. <i>Social Explainable AI: Communications of NII Shonan Meetings</i>. Springer Singapore.","apa":"Jimenez, P., Vollmer, A. L., &#38; Wachsmuth, H. (n.d.). Structures Underlying Explanations. In K. Rohlfing, K. Främling, B. Lim, S. Alpsancar, &#38; K. Thommes (Eds.), <i>Social Explainable AI: Communications of NII Shonan Meetings</i>. Springer Singapore.","short":"P. Jimenez, A.L. Vollmer, H. Wachsmuth, in: K. Rohlfing, K. Främling, B. Lim, S. Alpsancar, K. Thommes (Eds.), Social Explainable AI: Communications of NII Shonan Meetings, Springer Singapore, n.d.","bibtex":"@inbook{Jimenez_Vollmer_Wachsmuth, title={Structures Underlying Explanations}, booktitle={Social Explainable AI: Communications of NII Shonan Meetings}, publisher={Springer Singapore}, author={Jimenez, Patricia and Vollmer, Anna Lisa and Wachsmuth, Henning }, editor={Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten} }","mla":"Jimenez, Patricia, et al. “Structures Underlying Explanations.” <i>Social Explainable AI: Communications of NII Shonan Meetings</i>, edited by Katharina Rohlfing et al., Springer Singapore."},"year":"2026"},{"year":"2025","citation":{"mla":"Fichtel, Leandra, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” <i>ArXiv:2504.18483</i>, 2025.","short":"L. Fichtel, M. Spliethöver, E. Hüllermeier, P. Jimenez, N. Klowait, S. Kopp, A.-C. Ngonga Ngomo, A. Robrecht, I. Scharlau, L. Terfloth, A.-L. Vollmer, H. Wachsmuth, ArXiv:2504.18483 (2025).","bibtex":"@article{Fichtel_Spliethöver_Hüllermeier_Jimenez_Klowait_Kopp_Ngonga Ngomo_Robrecht_Scharlau_Terfloth_et al._2025, title={Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}, journal={arXiv:2504.18483}, author={Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and et al.}, year={2025} }","apa":"Fichtel, L., Spliethöver, M., Hüllermeier, E., Jimenez, P., Klowait, N., Kopp, S., Ngonga Ngomo, A.-C., Robrecht, A., Scharlau, I., Terfloth, L., Vollmer, A.-L., &#38; Wachsmuth, H. (2025). Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. In <i>arXiv:2504.18483</i>.","ama":"Fichtel L, Spliethöver M, Hüllermeier E, et al. Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. <i>arXiv:250418483</i>. Published online 2025.","chicago":"Fichtel, Leandra, Maximilian Spliethöver, Eyke Hüllermeier, Patricia Jimenez, Nils Klowait, Stefan Kopp, Axel-Cyrille Ngonga Ngomo, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” <i>ArXiv:2504.18483</i>, 2025.","ieee":"L. Fichtel <i>et al.</i>, “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues,” <i>arXiv:2504.18483</i>. 2025."},"page":"20","has_accepted_license":"1","title":"Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues","main_file_link":[{"url":"https://arxiv.org/pdf/2504.18483"}],"date_updated":"2025-07-23T11:23:32Z","author":[{"full_name":"Fichtel, Leandra","last_name":"Fichtel","first_name":"Leandra"},{"last_name":"Spliethöver","full_name":"Spliethöver, Maximilian","first_name":"Maximilian"},{"last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke","first_name":"Eyke"},{"first_name":"Patricia","last_name":"Jimenez","full_name":"Jimenez, Patricia","id":"103339"},{"first_name":"Nils","id":"98454","full_name":"Klowait, Nils","orcid":"0000-0002-7347-099X","last_name":"Klowait"},{"last_name":"Kopp","full_name":"Kopp, Stefan","first_name":"Stefan"},{"full_name":"Ngonga Ngomo, Axel-Cyrille","id":"65716","last_name":"Ngonga Ngomo","first_name":"Axel-Cyrille"},{"first_name":"Amelie","last_name":"Robrecht","full_name":"Robrecht, Amelie"},{"first_name":"Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451"},{"first_name":"Lutz","last_name":"Terfloth","full_name":"Terfloth, Lutz","id":"37320"},{"first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer"},{"last_name":"Wachsmuth","full_name":"Wachsmuth, Henning","first_name":"Henning"}],"date_created":"2025-07-22T13:10:42Z","abstract":[{"text":"The ability to generate explanations that are understood by explainees is the\r\nquintessence of explainable artificial intelligence. Since understanding\r\ndepends on the explainee's background and needs, recent research focused on\r\nco-constructive explanation dialogues, where an explainer continuously monitors\r\nthe explainee's understanding and adapts their explanations dynamically. We\r\ninvestigate the ability of large language models (LLMs) to engage as explainers\r\nin co-constructive explanation dialogues. In particular, we present a user\r\nstudy in which explainees interact with an LLM in two settings, one of which\r\ninvolves the LLM being instructed to explain a topic co-constructively. We\r\nevaluate the explainees' understanding before and after the dialogue, as well\r\nas their perception of the LLMs' co-constructive behavior. Our results suggest\r\nthat LLMs show some co-constructive behaviors, such as asking verification\r\nquestions, that foster the explainees' engagement and can improve understanding\r\nof a topic. However, their ability to effectively monitor the current\r\nunderstanding and scaffold the explanations accordingly remains limited.","lang":"eng"}],"status":"public","type":"preprint","publication":"arXiv:2504.18483","language":[{"iso":"eng"}],"project":[{"grant_number":"438445824","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","_id":"121"},{"_id":"127","name":"TRR 318 - C4: TRR 318 - Subproject C4 - Metaphern als Werkzeug des Erklärens"},{"name":"TRR 318 - B3: TRR 318 - Subproject B3","_id":"122"},{"_id":"119","name":"TRR 318 - Ö: TRR 318 - Project Area Ö"},{"grant_number":"438445824","name":"TRR 318 - A04: TRR 318 - Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten (Teilprojekt A04)","_id":"114"}],"external_id":{"arxiv":["2504.18483"]},"_id":"60718","user_id":"98454"},{"main_file_link":[{"url":"https://arxiv.org/abs/2504.18483","open_access":"1"}],"conference":{"name":"Annual Meeting of the Special Interest Group on Discourse and Dialogue"},"date_updated":"2025-09-12T09:50:48Z","oa":"1","author":[{"first_name":"Leandra","last_name":"Fichtel","full_name":"Fichtel, Leandra"},{"first_name":"Maximilian","orcid":"0000-0003-4364-1409","last_name":"Spliethöver","full_name":"Spliethöver, Maximilian","id":"84035"},{"full_name":"Hüllermeier, Eyke","id":"48129","last_name":"Hüllermeier","first_name":"Eyke"},{"first_name":"Patricia","last_name":"Jimenez","id":"103339","full_name":"Jimenez, Patricia"},{"last_name":"Klowait","orcid":"0000-0002-7347-099X","id":"98454","full_name":"Klowait, Nils","first_name":"Nils"},{"first_name":"Stefan","full_name":"Kopp, Stefan","last_name":"Kopp"},{"full_name":"Ngonga Ngomo, Axel-Cyrille","id":"65716","last_name":"Ngonga Ngomo","first_name":"Axel-Cyrille"},{"first_name":"Amelie","orcid":"0000-0001-5622-8248","last_name":"Robrecht","id":"91982","full_name":"Robrecht, Amelie"},{"id":"451","full_name":"Scharlau, Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","first_name":"Ingrid"},{"last_name":"Terfloth","full_name":"Terfloth, Lutz","id":"37320","first_name":"Lutz"},{"full_name":"Vollmer, Anna-Lisa","id":"86589","last_name":"Vollmer","first_name":"Anna-Lisa"},{"full_name":"Wachsmuth, Henning","id":"3900","last_name":"Wachsmuth","first_name":"Henning"}],"place":"Avignon, France","citation":{"bibtex":"@inproceedings{Fichtel_Spliethöver_Hüllermeier_Jimenez_Klowait_Kopp_Ngonga Ngomo_Robrecht_Scharlau_Terfloth_et al., place={Avignon, France}, title={Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}, booktitle={Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}, publisher={Association for Computational Linguistics}, author={Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and et al.} }","short":"L. Fichtel, M. Spliethöver, E. Hüllermeier, P. Jimenez, N. Klowait, S. Kopp, A.-C. Ngonga Ngomo, A. Robrecht, I. Scharlau, L. Terfloth, A.-L. Vollmer, H. Wachsmuth, in: Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Association for Computational Linguistics, Avignon, France, n.d.","mla":"Fichtel, Leandra, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>, Association for Computational Linguistics.","apa":"Fichtel, L., Spliethöver, M., Hüllermeier, E., Jimenez, P., Klowait, N., Kopp, S., Ngonga Ngomo, A.-C., Robrecht, A., Scharlau, I., Terfloth, L., Vollmer, A.-L., &#38; Wachsmuth, H. (n.d.). Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Annual Meeting of the Special Interest Group on Discourse and Dialogue.","ama":"Fichtel L, Spliethöver M, Hüllermeier E, et al. Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. In: <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Association for Computational Linguistics.","ieee":"L. Fichtel <i>et al.</i>, “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues,” presented at the Annual Meeting of the Special Interest Group on Discourse and Dialogue.","chicago":"Fichtel, Leandra, Maximilian Spliethöver, Eyke Hüllermeier, Patricia Jimenez, Nils Klowait, Stefan Kopp, Axel-Cyrille Ngonga Ngomo, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” In <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Avignon, France: Association for Computational Linguistics, n.d."},"publication_status":"accepted","related_material":{"link":[{"url":"https://github.com/webis-de/sigdial25-co-constructive-llms","relation":"software"},{"url":"https://github.com/webis-de/sigdial25-co-constructive-llms-data","relation":"research_data"}]},"project":[{"_id":"118","name":"TRR 318: Project Area INF"},{"name":"TRR 318; TP B01: Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens","_id":"121"},{"_id":"127","name":"TRR 318; TP C04: Metaphern als Werkzeug des Erklärens"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"name":"TRR 318 - Project Area Ö","_id":"119"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"}],"_id":"61234","user_id":"84035","department":[{"_id":"660"}],"status":"public","type":"conference","title":"Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues","publisher":"Association for Computational Linguistics","date_created":"2025-09-11T16:11:17Z","year":"2025","language":[{"iso":"eng"}],"external_id":{"arxiv":["2504.18483"]},"abstract":[{"lang":"eng","text":"The ability to generate explanations that are understood by explainees is the\r\nquintessence of explainable artificial intelligence. Since understanding\r\ndepends on the explainee's background and needs, recent research focused on\r\nco-constructive explanation dialogues, where an explainer continuously monitors\r\nthe explainee's understanding and adapts their explanations dynamically. We\r\ninvestigate the ability of large language models (LLMs) to engage as explainers\r\nin co-constructive explanation dialogues. In particular, we present a user\r\nstudy in which explainees interact with an LLM in two settings, one of which\r\ninvolves the LLM being instructed to explain a topic co-constructively. We\r\nevaluate the explainees' understanding before and after the dialogue, as well\r\nas their perception of the LLMs' co-constructive behavior. Our results suggest\r\nthat LLMs show some co-constructive behaviors, such as asking verification\r\nquestions, that foster the explainees' engagement and can improve understanding\r\nof a topic. However, their ability to effectively monitor the current\r\nunderstanding and scaffold the explanations accordingly remains limited."}],"publication":"Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue"},{"type":"preprint","publication":"arXiv","abstract":[{"text":"nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \\emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.","lang":"eng"}],"status":"public","project":[{"_id":"122","name":"TRR 318 - B3: TRR 318 - Subproject B3"},{"name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen","_id":"124"},{"_id":"370","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI"}],"_id":"59917","user_id":"93637","department":[{"_id":"424"},{"_id":"26"},{"_id":"756"}],"language":[{"iso":"eng"}],"year":"2025","citation":{"ama":"Paaßen B, Alpsancar S, Matzner T, Scharlau I. Healthy Distrust in AI systems. <i>arXiv</i>. Published online 2025.","chicago":"Paaßen, Benjamin, Suzana Alpsancar, Tobias Matzner, and Ingrid Scharlau. “Healthy Distrust in AI Systems.” <i>ArXiv</i>, 2025.","ieee":"B. Paaßen, S. Alpsancar, T. Matzner, and I. Scharlau, “Healthy Distrust in AI systems,” <i>arXiv</i>. 2025.","apa":"Paaßen, B., Alpsancar, S., Matzner, T., &#38; Scharlau, I. (2025). Healthy Distrust in AI systems. In <i>arXiv</i>.","bibtex":"@article{Paaßen_Alpsancar_Matzner_Scharlau_2025, title={Healthy Distrust in AI systems}, journal={arXiv}, author={Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}, year={2025} }","short":"B. Paaßen, S. Alpsancar, T. Matzner, I. Scharlau, ArXiv (2025).","mla":"Paaßen, Benjamin, et al. “Healthy Distrust in AI Systems.” <i>ArXiv</i>, 2025."},"oa":"1","date_updated":"2025-11-18T09:38:01Z","author":[{"full_name":"Paaßen, Benjamin","last_name":"Paaßen","first_name":"Benjamin"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana","id":"93637"},{"full_name":"Matzner, Tobias","id":"65695","last_name":"Matzner","first_name":"Tobias"},{"first_name":"Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451"}],"date_created":"2025-05-16T09:39:13Z","title":"Healthy Distrust in AI systems","main_file_link":[{"url":"https://arxiv.org/abs/2505.09747","open_access":"1"}]},{"status":"public","type":"journal_article","article_type":"original","article_number":"101419","file_date_updated":"2025-12-01T21:02:20Z","project":[{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"_id":"113","name":"TRR 318 - Subproject A3"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"},{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"_id":"123","name":"TRR 318 - Subproject B5"},{"_id":"119","name":"TRR 318 - Project Area Ö"}],"_id":"61156","user_id":"57578","department":[{"_id":"660"}],"citation":{"apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>."},"intvolume":"        94","publication_status":"published","has_accepted_license":"1","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}],"doi":"10.1016/j.cogsys.2025.101419","date_updated":"2025-12-05T15:32:25Z","oa":"1","author":[{"orcid":"0000-0002-9613-5713","last_name":"Buschmeier","id":"76456","full_name":"Buschmeier, Hendrik","first_name":"Hendrik"},{"first_name":"Heike M.","id":"27152","full_name":"Buhl, Heike M.","last_name":"Buhl"},{"last_name":"Kern","full_name":"Kern, Friederike","first_name":"Friederike"},{"first_name":"Angela","full_name":"Grimminger, Angela","id":"57578","last_name":"Grimminger"},{"first_name":"Helen","id":"50995","full_name":"Beierling, Helen","last_name":"Beierling"},{"first_name":"Josephine Beryl","id":"56345","full_name":"Fisher, Josephine Beryl","last_name":"Fisher","orcid":"0000-0002-9997-9241"},{"last_name":"Groß","orcid":"0000-0002-9593-7220","full_name":"Groß, André","id":"93405","first_name":"André"},{"first_name":"Ilona","last_name":"Horwath","id":"68836","full_name":"Horwath, Ilona"},{"first_name":"Nils","full_name":"Klowait, Nils","id":"98454","last_name":"Klowait","orcid":"0000-0002-7347-099X"},{"first_name":"Stefan Teodorov","id":"90345","full_name":"Lazarov, Stefan Teodorov","last_name":"Lazarov","orcid":"0009-0009-0892-9483"},{"first_name":"Michael","full_name":"Lenke, Michael","last_name":"Lenke"},{"full_name":"Lohmer, Vivien","last_name":"Lohmer","first_name":"Vivien"},{"full_name":"Rohlfing, Katharina","id":"50352","orcid":"0000-0002-5676-8233","last_name":"Rohlfing","first_name":"Katharina"},{"first_name":"Ingrid","full_name":"Scharlau, Ingrid","id":"451","last_name":"Scharlau","orcid":"0000-0003-2364-9489"},{"first_name":"Amit","id":"91018","full_name":"Singh, Amit","last_name":"Singh","orcid":"0000-0002-7789-1521"},{"last_name":"Terfloth","full_name":"Terfloth, Lutz","id":"37320","first_name":"Lutz"},{"last_name":"Vollmer","id":"86589","full_name":"Vollmer, Anna-Lisa","first_name":"Anna-Lisa"},{"first_name":"Yu","full_name":"Wang, Yu","last_name":"Wang"},{"full_name":"Wilmes, Annedore","last_name":"Wilmes","first_name":"Annedore"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"volume":94,"abstract":[{"text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.","lang":"eng"}],"file":[{"content_type":"application/pdf","relation":"main_file","success":1,"date_created":"2025-12-01T21:02:20Z","creator":"hbuschme","date_updated":"2025-12-01T21:02:20Z","file_id":"62730","access_level":"closed","file_name":"Buschmeier-etal-2025-COGSYS.pdf","file_size":10114981}],"publication":"Cognitive Systems Research","ddc":["006"],"keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"language":[{"iso":"eng"}],"year":"2025","quality_controlled":"1","title":"Forms of Understanding for XAI-Explanations","date_created":"2025-09-08T14:24:32Z"},{"language":[{"iso":"eng"}],"keyword":["Sociology and Political Science","Communication"],"publication":"Media, Culture & Society","abstract":[{"lang":"eng","text":"<jats:p> The algorithmic imaginary as a theoretical concept has received increasing attention in recent years as it aims at users’ appropriation of algorithmic processes operating in opacity. But the concept originally only starts from the users’ point of view, while the processes on the platforms’ side are largely left out. In contrast, this paper argues that what is true for users is also valid for algorithmic processes and the designers behind. On the one hand, the algorithm imagines users’ future behavior via machine learning, which is supposed to predict all their future actions. On the other hand, the designers anticipate different actions that could potentially performed by users with every new implementation of features such as social media feeds. In order to bring into view this permanently reciprocal interplay coupled to the imaginary, in which not only the users are involved, I will argue for a more comprehensive and theoretically precise algorithmic imaginary referring to the theory of Cornelius Castoriadis. In such a perspective, an important contribution can be formulated for a theory of social media platforms that goes beyond praxeocentrism or structural determinism. </jats:p>"}],"date_created":"2024-02-14T09:21:17Z","publisher":"SAGE Publications","title":"A new algorithmic imaginary","issue":"3","year":"2023","department":[{"_id":"660"}],"user_id":"54779","_id":"51345","project":[{"_id":"122","name":"TRR 318 - B3: TRR 318 - Subproject B3"}],"type":"journal_article","status":"public","volume":45,"author":[{"first_name":"Christian","last_name":"Schulz","full_name":"Schulz, Christian","id":"72684"}],"date_updated":"2024-02-26T08:39:45Z","doi":"10.1177/01634437221136014","publication_identifier":{"issn":["0163-4437","1460-3675"]},"publication_status":"published","intvolume":"        45","page":"646-655","citation":{"ama":"Schulz C. A new algorithmic imaginary. <i>Media, Culture &#38; Society</i>. 2023;45(3):646-655. doi:<a href=\"https://doi.org/10.1177/01634437221136014\">10.1177/01634437221136014</a>","chicago":"Schulz, Christian. “A New Algorithmic Imaginary.” <i>Media, Culture &#38; Society</i> 45, no. 3 (2023): 646–55. <a href=\"https://doi.org/10.1177/01634437221136014\">https://doi.org/10.1177/01634437221136014</a>.","ieee":"C. Schulz, “A new algorithmic imaginary,” <i>Media, Culture &#38; Society</i>, vol. 45, no. 3, pp. 646–655, 2023, doi: <a href=\"https://doi.org/10.1177/01634437221136014\">10.1177/01634437221136014</a>.","short":"C. Schulz, Media, Culture &#38; Society 45 (2023) 646–655.","mla":"Schulz, Christian. “A New Algorithmic Imaginary.” <i>Media, Culture &#38; Society</i>, vol. 45, no. 3, SAGE Publications, 2023, pp. 646–55, doi:<a href=\"https://doi.org/10.1177/01634437221136014\">10.1177/01634437221136014</a>.","bibtex":"@article{Schulz_2023, title={A new algorithmic imaginary}, volume={45}, DOI={<a href=\"https://doi.org/10.1177/01634437221136014\">10.1177/01634437221136014</a>}, number={3}, journal={Media, Culture &#38; Society}, publisher={SAGE Publications}, author={Schulz, Christian}, year={2023}, pages={646–655} }","apa":"Schulz, C. (2023). A new algorithmic imaginary. <i>Media, Culture &#38; Society</i>, <i>45</i>(3), 646–655. <a href=\"https://doi.org/10.1177/01634437221136014\">https://doi.org/10.1177/01634437221136014</a>"}},{"title":"Vernacular Metaphors of AI ","date_created":"2024-02-22T15:11:29Z","author":[{"first_name":"Christian","full_name":"Schulz, Christian","id":"72684","last_name":"Schulz"},{"first_name":"Annedore ","last_name":"Wilmes ","full_name":"Wilmes , Annedore "}],"date_updated":"2024-08-14T06:04:55Z","citation":{"ama":"Schulz C, Wilmes  A. Vernacular Metaphors of AI .","ieee":"C. Schulz and A. Wilmes , “Vernacular Metaphors of AI .”","chicago":"Schulz, Christian, and Annedore  Wilmes . “Vernacular Metaphors of AI .” ICA Preconference Workshop “History of Digital Metaphors”, University of Toronto, May 25 , n.d.","bibtex":"@inproceedings{Schulz_Wilmes , place={ICA Preconference Workshop “History of Digital Metaphors”, University of Toronto, May 25 }, title={Vernacular Metaphors of AI }, author={Schulz, Christian and Wilmes , Annedore } }","mla":"Schulz, Christian, and Annedore Wilmes . <i>Vernacular Metaphors of AI </i>.","short":"C. Schulz, A. Wilmes , in: ICA Preconference Workshop “History of Digital Metaphors”, University of Toronto, May 25 , n.d.","apa":"Schulz, C., &#38; Wilmes , A. (n.d.). <i>Vernacular Metaphors of AI </i>."},"year":"2023","place":"ICA Preconference Workshop \"History of Digital Metaphors\", University of Toronto, May 25 ","publication_status":"unpublished","language":[{"iso":"eng"}],"user_id":"72684","department":[{"_id":"660"}],"project":[{"_id":"122","name":"TRR 318 - B3: TRR 318 - Subproject B3"}],"_id":"51766","status":"public","type":"conference"},{"language":[{"iso":"eng"}],"department":[{"_id":"757"}],"user_id":"72684","series_title":"Lecture Notes in Computer Science","_id":"51752","project":[{"name":"TRR 318 - B3: TRR 318 - Subproject B3","_id":"122"}],"status":"public","publication":"Artificial Intelligence in HCI","type":"conference","doi":"10.1007/978-3-031-05643-7_10","title":"(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems","author":[{"full_name":"Finke, Josefine","last_name":"Finke","first_name":"Josefine"},{"first_name":"Ilona","full_name":"Horwath, Ilona","id":"68836","last_name":"Horwath"},{"first_name":"Tobias","last_name":"Matzner","id":"65695","full_name":"Matzner, Tobias"},{"first_name":"Christian","last_name":"Schulz","id":"72684","full_name":"Schulz, Christian"}],"date_created":"2024-02-22T14:41:24Z","publisher":"Springer International Publishing ","date_updated":"2024-07-02T06:19:43Z","page":"149-160","citation":{"ieee":"J. Finke, I. Horwath, T. Matzner, and C. Schulz, “(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems,” in <i>Artificial Intelligence in HCI</i>, 2022, pp. 149–160, doi: <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>.","chicago":"Finke, Josefine, Ilona Horwath, Tobias Matzner, and Christian Schulz. “(De)Coding Social Practice in the Field of XAI: Towards a Co-Constructive Framework of Explanations and Understanding between Lay Users and Algorithmic Systems.” In <i>Artificial Intelligence in HCI</i>, 149–60. Lecture Notes in Computer Science. Cham: Springer International Publishing , 2022. <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">https://doi.org/10.1007/978-3-031-05643-7_10</a>.","ama":"Finke J, Horwath I, Matzner T, Schulz C. (De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems. In: <i>Artificial Intelligence in HCI</i>. Lecture Notes in Computer Science. Springer International Publishing ; 2022:149-160. doi:<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>","mla":"Finke, Josefine, et al. “(De)Coding Social Practice in the Field of XAI: Towards a Co-Constructive Framework of Explanations and Understanding between Lay Users and Algorithmic Systems.” <i>Artificial Intelligence in HCI</i>, Springer International Publishing , 2022, pp. 149–60, doi:<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>.","short":"J. Finke, I. Horwath, T. Matzner, C. Schulz, in: Artificial Intelligence in HCI, Springer International Publishing , Cham, 2022, pp. 149–160.","bibtex":"@inproceedings{Finke_Horwath_Matzner_Schulz_2022, place={Cham}, series={Lecture Notes in Computer Science}, title={(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems}, DOI={<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>}, booktitle={Artificial Intelligence in HCI}, publisher={Springer International Publishing }, author={Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}, year={2022}, pages={149–160}, collection={Lecture Notes in Computer Science} }","apa":"Finke, J., Horwath, I., Matzner, T., &#38; Schulz, C. (2022). (De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems. <i>Artificial Intelligence in HCI</i>, 149–160. <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">https://doi.org/10.1007/978-3-031-05643-7_10</a>"},"place":"Cham","year":"2022","publication_status":"published"},{"date_updated":"2023-05-03T08:24:22Z","publisher":"Springer International Publishing","date_created":"2023-01-24T16:09:42Z","author":[{"first_name":"Josefine","full_name":"Finke, Josefine","last_name":"Finke"},{"first_name":"Ilona","last_name":"Horwath","full_name":"Horwath, Ilona","id":"68836"},{"last_name":"Matzner","full_name":"Matzner, Tobias","id":"65695","first_name":"Tobias"},{"full_name":"Schulz, Christian","last_name":"Schulz","first_name":"Christian"}],"title":"(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems","conference":{"name":"AI in International Conference on Human-Computer Interaction"},"doi":"10.1007/978-3-031-05643-7_10","publication_status":"published","quality_controlled":"1","year":"2022","place":"Cham","citation":{"ama":"Finke J, Horwath I, Matzner T, Schulz C. (De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems. In: <i>Artificial Intelligence in HCI</i>. Springer International Publishing; 2022:149-160. doi:<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>","chicago":"Finke, Josefine, Ilona Horwath, Tobias Matzner, and Christian Schulz. “(De)Coding Social Practice in the Field of XAI: Towards a Co-Constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems.” In <i>Artificial Intelligence in HCI</i>, 149–60. Cham: Springer International Publishing, 2022. <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">https://doi.org/10.1007/978-3-031-05643-7_10</a>.","ieee":"J. Finke, I. Horwath, T. Matzner, and C. Schulz, “(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems,” in <i>Artificial Intelligence in HCI</i>, 2022, pp. 149–160, doi: <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>.","apa":"Finke, J., Horwath, I., Matzner, T., &#38; Schulz, C. (2022). (De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems. <i>Artificial Intelligence in HCI</i>, 149–160. <a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">https://doi.org/10.1007/978-3-031-05643-7_10</a>","mla":"Finke, Josefine, et al. “(De)Coding Social Practice in the Field of XAI: Towards a Co-Constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems.” <i>Artificial Intelligence in HCI</i>, Springer International Publishing, 2022, pp. 149–60, doi:<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>.","bibtex":"@inproceedings{Finke_Horwath_Matzner_Schulz_2022, place={Cham}, title={(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems}, DOI={<a href=\"https://doi.org/10.1007/978-3-031-05643-7_10\">10.1007/978-3-031-05643-7_10</a>}, booktitle={Artificial Intelligence in HCI}, publisher={Springer International Publishing}, author={Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}, year={2022}, pages={149–160} }","short":"J. Finke, I. Horwath, T. Matzner, C. Schulz, in: Artificial Intelligence in HCI, Springer International Publishing, Cham, 2022, pp. 149–160."},"page":"149-160","project":[{"name":"TRR 318 - B3: TRR 318 - Subproject B3","_id":"122"}],"_id":"39639","user_id":"68836","department":[{"_id":"603"},{"_id":"757"}],"language":[{"iso":"eng"}],"type":"conference","publication":"Artificial Intelligence in HCI","status":"public"}]
