[{"language":[{"iso":"eng"}],"project":[{"_id":"111","name":"TRR 318; TP A01: Adaptives Erklären"},{"_id":"114","name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten"},{"_id":"121","name":"TRR 318; TP B01: Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens"}],"_id":"65084","user_id":"90826","department":[{"_id":"427"},{"_id":"660"}],"editor":[{"full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing","first_name":"Katharina J."},{"first_name":"Kary","full_name":"Främling, Kary","last_name":"Främling"},{"first_name":"Brian","last_name":"Lim","full_name":"Lim, Brian"},{"full_name":"Alpsancar, Suzana","last_name":"Alpsancar","first_name":"Suzana"},{"full_name":"Thommes, Kisten","last_name":"Thommes","first_name":"Kisten"}],"status":"public","type":"book_chapter","publication":"Social explainable AI","title":"Models of the situation, the explanandum, and the interaction partner","main_file_link":[{"open_access":"1","url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_14"}],"doi":"https://doi.org/10.1007/978-981-96-5290-7_14","publisher":"Springer","date_updated":"2026-03-23T18:24:49Z","oa":"1","date_created":"2026-03-23T08:06:57Z","author":[{"last_name":"Buhl","id":"27152","full_name":"Buhl, Heike M.","first_name":"Heike M."},{"full_name":"Vollmer, Anna-Lisa","id":"86589","last_name":"Vollmer","first_name":"Anna-Lisa"},{"first_name":"Rachid","last_name":"Alami","full_name":"Alami, Rachid"},{"full_name":"Booshehri, Meisam","id":"93424","last_name":"Booshehri","first_name":"Meisam"},{"first_name":"Kary","full_name":"Främling, Kary","last_name":"Främling"}],"year":"2026","citation":{"chicago":"Buhl, Heike M., Anna-Lisa Vollmer, Rachid Alami, Meisam Booshehri, and Kary Främling. “Models of the Situation, the Explanandum, and the Interaction Partner.” In <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing, Kary Främling, Brian Lim, Suzana Alpsancar, and Kisten Thommes, 269–95. Springer, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_14\">https://doi.org/10.1007/978-981-96-5290-7_14</a>.","ieee":"H. M. Buhl, A.-L. Vollmer, R. Alami, M. Booshehri, and K. Främling, “Models of the situation, the explanandum, and the interaction partner,” in <i>Social explainable AI</i>, K. J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, and K. Thommes, Eds. Springer, 2026, pp. 269–295.","ama":"Buhl HM, Vollmer A-L, Alami R, Booshehri M, Främling K. Models of the situation, the explanandum, and the interaction partner. In: Rohlfing KJ, Främling K, Lim B, Alpsancar S, Thommes K, eds. <i>Social Explainable AI</i>. Springer; 2026:269-295. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_14\">https://doi.org/10.1007/978-981-96-5290-7_14</a>","short":"H.M. Buhl, A.-L. Vollmer, R. Alami, M. Booshehri, K. Främling, in: K.J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, K. Thommes (Eds.), Social Explainable AI, Springer, 2026, pp. 269–295.","bibtex":"@inbook{Buhl_Vollmer_Alami_Booshehri_Främling_2026, title={Models of the situation, the explanandum, and the interaction partner}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_14\">https://doi.org/10.1007/978-981-96-5290-7_14</a>}, booktitle={Social explainable AI}, publisher={Springer}, author={Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}, editor={Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}, year={2026}, pages={269–295} }","mla":"Buhl, Heike M., et al. “Models of the Situation, the Explanandum, and the Interaction Partner.” <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing et al., Springer, 2026, pp. 269–95, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_14\">https://doi.org/10.1007/978-981-96-5290-7_14</a>.","apa":"Buhl, H. M., Vollmer, A.-L., Alami, R., Booshehri, M., &#38; Främling, K. (2026). Models of the situation, the explanandum, and the interaction partner. In K. J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, &#38; K. Thommes (Eds.), <i>Social explainable AI</i> (pp. 269–295). Springer. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_14\">https://doi.org/10.1007/978-981-96-5290-7_14</a>"},"page":"269-295","related_material":{"link":[{"relation":"confirmation","url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_14"}]}},{"department":[{"_id":"660"}],"user_id":"84035","_id":"61234","project":[{"_id":"118","name":"TRR 318: Project Area INF"},{"name":"TRR 318; TP B01: Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens","_id":"121"},{"name":"TRR 318; TP C04: Metaphern als Werkzeug des Erklärens","_id":"127"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"_id":"119","name":"TRR 318 - Project Area Ö"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"}],"status":"public","type":"conference","conference":{"name":"Annual Meeting of the Special Interest Group on Discourse and Dialogue"},"main_file_link":[{"url":"https://arxiv.org/abs/2504.18483","open_access":"1"}],"author":[{"last_name":"Fichtel","full_name":"Fichtel, Leandra","first_name":"Leandra"},{"orcid":"0000-0003-4364-1409","last_name":"Spliethöver","id":"84035","full_name":"Spliethöver, Maximilian","first_name":"Maximilian"},{"last_name":"Hüllermeier","id":"48129","full_name":"Hüllermeier, Eyke","first_name":"Eyke"},{"id":"103339","full_name":"Jimenez, Patricia","last_name":"Jimenez","first_name":"Patricia"},{"orcid":"0000-0002-7347-099X","last_name":"Klowait","full_name":"Klowait, Nils","id":"98454","first_name":"Nils"},{"full_name":"Kopp, Stefan","last_name":"Kopp","first_name":"Stefan"},{"first_name":"Axel-Cyrille","full_name":"Ngonga Ngomo, Axel-Cyrille","id":"65716","last_name":"Ngonga Ngomo"},{"first_name":"Amelie","orcid":"0000-0001-5622-8248","last_name":"Robrecht","id":"91982","full_name":"Robrecht, Amelie"},{"first_name":"Ingrid","full_name":"Scharlau, Ingrid","id":"451","orcid":"0000-0003-2364-9489","last_name":"Scharlau"},{"first_name":"Lutz","full_name":"Terfloth, Lutz","id":"37320","last_name":"Terfloth"},{"first_name":"Anna-Lisa","id":"86589","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer"},{"first_name":"Henning","full_name":"Wachsmuth, Henning","id":"3900","last_name":"Wachsmuth"}],"oa":"1","date_updated":"2025-09-12T09:50:48Z","citation":{"bibtex":"@inproceedings{Fichtel_Spliethöver_Hüllermeier_Jimenez_Klowait_Kopp_Ngonga Ngomo_Robrecht_Scharlau_Terfloth_et al., place={Avignon, France}, title={Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}, booktitle={Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}, publisher={Association for Computational Linguistics}, author={Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and et al.} }","short":"L. Fichtel, M. Spliethöver, E. Hüllermeier, P. Jimenez, N. Klowait, S. Kopp, A.-C. Ngonga Ngomo, A. Robrecht, I. Scharlau, L. Terfloth, A.-L. Vollmer, H. Wachsmuth, in: Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Association for Computational Linguistics, Avignon, France, n.d.","mla":"Fichtel, Leandra, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>, Association for Computational Linguistics.","apa":"Fichtel, L., Spliethöver, M., Hüllermeier, E., Jimenez, P., Klowait, N., Kopp, S., Ngonga Ngomo, A.-C., Robrecht, A., Scharlau, I., Terfloth, L., Vollmer, A.-L., &#38; Wachsmuth, H. (n.d.). Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Annual Meeting of the Special Interest Group on Discourse and Dialogue.","ama":"Fichtel L, Spliethöver M, Hüllermeier E, et al. Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues. In: <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Association for Computational Linguistics.","ieee":"L. Fichtel <i>et al.</i>, “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues,” presented at the Annual Meeting of the Special Interest Group on Discourse and Dialogue.","chicago":"Fichtel, Leandra, Maximilian Spliethöver, Eyke Hüllermeier, Patricia Jimenez, Nils Klowait, Stefan Kopp, Axel-Cyrille Ngonga Ngomo, et al. “Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues.” In <i>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</i>. Avignon, France: Association for Computational Linguistics, n.d."},"place":"Avignon, France","related_material":{"link":[{"relation":"software","url":"https://github.com/webis-de/sigdial25-co-constructive-llms"},{"relation":"research_data","url":"https://github.com/webis-de/sigdial25-co-constructive-llms-data"}]},"publication_status":"accepted","language":[{"iso":"eng"}],"external_id":{"arxiv":["2504.18483"]},"abstract":[{"lang":"eng","text":"The ability to generate explanations that are understood by explainees is the\r\nquintessence of explainable artificial intelligence. Since understanding\r\ndepends on the explainee's background and needs, recent research focused on\r\nco-constructive explanation dialogues, where an explainer continuously monitors\r\nthe explainee's understanding and adapts their explanations dynamically. We\r\ninvestigate the ability of large language models (LLMs) to engage as explainers\r\nin co-constructive explanation dialogues. In particular, we present a user\r\nstudy in which explainees interact with an LLM in two settings, one of which\r\ninvolves the LLM being instructed to explain a topic co-constructively. We\r\nevaluate the explainees' understanding before and after the dialogue, as well\r\nas their perception of the LLMs' co-constructive behavior. Our results suggest\r\nthat LLMs show some co-constructive behaviors, such as asking verification\r\nquestions, that foster the explainees' engagement and can improve understanding\r\nof a topic. However, their ability to effectively monitor the current\r\nunderstanding and scaffold the explanations accordingly remains limited."}],"publication":"Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue","title":"Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues","date_created":"2025-09-11T16:11:17Z","publisher":"Association for Computational Linguistics","year":"2025"},{"publication":"Cognitive Systems Research","file":[{"access_level":"closed","file_id":"62730","file_name":"Buschmeier-etal-2025-COGSYS.pdf","file_size":10114981,"creator":"hbuschme","date_created":"2025-12-01T21:02:20Z","date_updated":"2025-12-01T21:02:20Z","relation":"main_file","success":1,"content_type":"application/pdf"}],"abstract":[{"lang":"eng","text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed."}],"language":[{"iso":"eng"}],"keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"ddc":["006"],"quality_controlled":"1","year":"2025","date_created":"2025-09-08T14:24:32Z","title":"Forms of Understanding for XAI-Explanations","type":"journal_article","status":"public","department":[{"_id":"660"}],"user_id":"57578","_id":"61156","project":[{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten","_id":"112"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"_id":"123","name":"TRR 318 - Subproject B5"},{"name":"TRR 318 - Project Area Ö","_id":"119"}],"file_date_updated":"2025-12-01T21:02:20Z","article_type":"original","article_number":"101419","has_accepted_license":"1","publication_status":"published","intvolume":"        94","citation":{"apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>"},"volume":94,"author":[{"full_name":"Buschmeier, Hendrik","id":"76456","last_name":"Buschmeier","orcid":"0000-0002-9613-5713","first_name":"Hendrik"},{"full_name":"Buhl, Heike M.","id":"27152","last_name":"Buhl","first_name":"Heike M."},{"full_name":"Kern, Friederike","last_name":"Kern","first_name":"Friederike"},{"last_name":"Grimminger","full_name":"Grimminger, Angela","id":"57578","first_name":"Angela"},{"first_name":"Helen","id":"50995","full_name":"Beierling, Helen","last_name":"Beierling"},{"orcid":"0000-0002-9997-9241","last_name":"Fisher","full_name":"Fisher, Josephine Beryl","id":"56345","first_name":"Josephine Beryl"},{"last_name":"Groß","orcid":"0000-0002-9593-7220","id":"93405","full_name":"Groß, André","first_name":"André"},{"first_name":"Ilona","last_name":"Horwath","id":"68836","full_name":"Horwath, Ilona"},{"first_name":"Nils","last_name":"Klowait","orcid":"0000-0002-7347-099X","id":"98454","full_name":"Klowait, Nils"},{"last_name":"Lazarov","orcid":"0009-0009-0892-9483","id":"90345","full_name":"Lazarov, Stefan Teodorov","first_name":"Stefan Teodorov"},{"last_name":"Lenke","full_name":"Lenke, Michael","first_name":"Michael"},{"full_name":"Lohmer, Vivien","last_name":"Lohmer","first_name":"Vivien"},{"last_name":"Rohlfing","orcid":"0000-0002-5676-8233","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"last_name":"Scharlau","orcid":"0000-0003-2364-9489","id":"451","full_name":"Scharlau, Ingrid","first_name":"Ingrid"},{"first_name":"Amit","full_name":"Singh, Amit","id":"91018","last_name":"Singh","orcid":"0000-0002-7789-1521"},{"id":"37320","full_name":"Terfloth, Lutz","last_name":"Terfloth","first_name":"Lutz"},{"last_name":"Vollmer","id":"86589","full_name":"Vollmer, Anna-Lisa","first_name":"Anna-Lisa"},{"full_name":"Wang, Yu","last_name":"Wang","first_name":"Yu"},{"full_name":"Wilmes, Annedore","last_name":"Wilmes","first_name":"Annedore"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"}],"date_updated":"2025-12-05T15:32:25Z","oa":"1","doi":"10.1016/j.cogsys.2025.101419","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}]},{"user_id":"44029","department":[{"_id":"184"},{"_id":"178"},{"_id":"660"}],"project":[{"_id":"125","name":"TRR 318 - C2: TRR 318 - Subproject C2"},{"name":"TRR 318 - A3: TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318 - A02: TRR 318 - Verstehensprozess einer Erklärung beobachten und auswerten (Teilprojekt A02)","_id":"112","grant_number":"438445824"},{"name":"TRR 318 - INF: TRR 318 - Project Area INF","_id":"118"}],"_id":"55403","status":"public","type":"conference","main_file_link":[{"open_access":"1"}],"conference":{"location":"Lisbon, Portugal","name":"2024 Workshop on Explainability Engineering"},"doi":"10.1145/3648505.3648509","author":[{"first_name":"Hendrik","full_name":"Buschmeier, Hendrik","id":"76456","last_name":"Buschmeier","orcid":"0000-0002-9613-5713"},{"first_name":"Philipp","full_name":"Cimiano, Philipp","last_name":"Cimiano"},{"full_name":"Kopp, Stefan","last_name":"Kopp","first_name":"Stefan"},{"first_name":"Jaroslaw","id":"44029","full_name":"Kornowicz, Jaroslaw","last_name":"Kornowicz","orcid":"0000-0002-5654-9911"},{"full_name":"Lammert, Olesja","id":"47384","last_name":"Lammert","orcid":"0000-0001-8201-5166","first_name":"Olesja"},{"first_name":"Marco","last_name":"Matarese","full_name":"Matarese, Marco"},{"last_name":"Mindlin","full_name":"Mindlin, Dimitry","first_name":"Dimitry"},{"first_name":"Amelie Sophie","last_name":"Robrecht","full_name":"Robrecht, Amelie Sophie"},{"first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa","id":"86589","last_name":"Vollmer"},{"first_name":"Petra","last_name":"Wagner","id":"74505","full_name":"Wagner, Petra"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"},{"id":"93424","full_name":"Booshehri, Meisam","last_name":"Booshehri","first_name":"Meisam"}],"date_updated":"2025-03-14T19:24:35Z","oa":"1","citation":{"apa":"Buschmeier, H., Cimiano, P., Kopp, S., Kornowicz, J., Lammert, O., Matarese, M., Mindlin, D., Robrecht, A. S., Vollmer, A.-L., Wagner, P., Wrede, B., &#38; Booshehri, M. (2024). Towards a Computational Architecture for Co-Constructive Explainable Systems. <i>Proceedings of the 2024 Workshop on Explainability Engineering</i>, 20–25. <a href=\"https://doi.org/10.1145/3648505.3648509\">https://doi.org/10.1145/3648505.3648509</a>","mla":"Buschmeier, Hendrik, et al. “Towards a Computational Architecture for Co-Constructive Explainable Systems.” <i>Proceedings of the 2024 Workshop on Explainability Engineering</i>, ACM, 2024, pp. 20–25, doi:<a href=\"https://doi.org/10.1145/3648505.3648509\">10.1145/3648505.3648509</a>.","bibtex":"@inproceedings{Buschmeier_Cimiano_Kopp_Kornowicz_Lammert_Matarese_Mindlin_Robrecht_Vollmer_Wagner_et al._2024, title={Towards a Computational Architecture for Co-Constructive Explainable Systems}, DOI={<a href=\"https://doi.org/10.1145/3648505.3648509\">10.1145/3648505.3648509</a>}, booktitle={Proceedings of the 2024 Workshop on Explainability Engineering}, publisher={ACM}, author={Buschmeier, Hendrik and Cimiano, Philipp and Kopp, Stefan and Kornowicz, Jaroslaw and Lammert, Olesja and Matarese, Marco and Mindlin, Dimitry and Robrecht, Amelie Sophie and Vollmer, Anna-Lisa and Wagner, Petra and et al.}, year={2024}, pages={20–25} }","short":"H. Buschmeier, P. Cimiano, S. Kopp, J. Kornowicz, O. Lammert, M. Matarese, D. Mindlin, A.S. Robrecht, A.-L. Vollmer, P. Wagner, B. Wrede, M. Booshehri, in: Proceedings of the 2024 Workshop on Explainability Engineering, ACM, 2024, pp. 20–25.","ama":"Buschmeier H, Cimiano P, Kopp S, et al. Towards a Computational Architecture for Co-Constructive Explainable Systems. In: <i>Proceedings of the 2024 Workshop on Explainability Engineering</i>. ACM; 2024:20-25. doi:<a href=\"https://doi.org/10.1145/3648505.3648509\">10.1145/3648505.3648509</a>","ieee":"H. Buschmeier <i>et al.</i>, “Towards a Computational Architecture for Co-Constructive Explainable Systems,” in <i>Proceedings of the 2024 Workshop on Explainability Engineering</i>, Lisbon, Portugal, 2024, pp. 20–25, doi: <a href=\"https://doi.org/10.1145/3648505.3648509\">10.1145/3648505.3648509</a>.","chicago":"Buschmeier, Hendrik, Philipp Cimiano, Stefan Kopp, Jaroslaw Kornowicz, Olesja Lammert, Marco Matarese, Dimitry Mindlin, et al. “Towards a Computational Architecture for Co-Constructive Explainable Systems.” In <i>Proceedings of the 2024 Workshop on Explainability Engineering</i>, 20–25. ACM, 2024. <a href=\"https://doi.org/10.1145/3648505.3648509\">https://doi.org/10.1145/3648505.3648509</a>."},"page":"20-25","publication_status":"published","language":[{"iso":"eng"}],"abstract":[{"text":"In this paper we consider the interactive processes by which an explainer and an explainee cooperate to produce an explanation, which we refer to as co-construction. Explainable Artificial Intelligence (XAI) is concerned with the development of intelligent systems and robots that can explain and justify their actions, decisions, recommendations, and so on. However, the cooperative construction of explanations remains a key but under-explored issue. This short paper proposes an architecture for intelligent systems that promotes a co-constructive and interactive approach to explanation generation. By outlining its basic components and their specific roles, we aim to contribute to the advancement of XAI computational frameworks that actively engage users in the explanation process.","lang":"eng"}],"publication":"Proceedings of the 2024 Workshop on Explainability Engineering","title":"Towards a Computational Architecture for Co-Constructive Explainable Systems","date_created":"2024-07-26T11:57:31Z","publisher":"ACM","year":"2024","quality_controlled":"1"}]
