[{"status":"public","type":"journal_article","article_number":"101419","article_type":"original","file_date_updated":"2025-12-01T21:02:20Z","_id":"61156","project":[{"_id":"111","name":"TRR 318; TP A01: Adaptives Erklären"},{"name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten","_id":"112"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"},{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"},{"_id":"122","name":"TRR 318 - Subproject B3"},{"name":"TRR 318 - Subproject B5","_id":"123"},{"_id":"119","name":"TRR 318 - Project Area Ö"}],"department":[{"_id":"660"}],"user_id":"57578","intvolume":"        94","citation":{"bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>"},"has_accepted_license":"1","publication_status":"published","doi":"10.1016/j.cogsys.2025.101419","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}],"oa":"1","date_updated":"2025-12-05T15:32:25Z","volume":94,"author":[{"first_name":"Hendrik","full_name":"Buschmeier, Hendrik","id":"76456","last_name":"Buschmeier","orcid":"0000-0002-9613-5713"},{"first_name":"Heike M.","full_name":"Buhl, Heike M.","id":"27152","last_name":"Buhl"},{"full_name":"Kern, Friederike","last_name":"Kern","first_name":"Friederike"},{"first_name":"Angela","last_name":"Grimminger","id":"57578","full_name":"Grimminger, Angela"},{"full_name":"Beierling, Helen","id":"50995","last_name":"Beierling","first_name":"Helen"},{"orcid":"0000-0002-9997-9241","last_name":"Fisher","id":"56345","full_name":"Fisher, Josephine Beryl","first_name":"Josephine Beryl"},{"first_name":"André","orcid":"0000-0002-9593-7220","last_name":"Groß","full_name":"Groß, André","id":"93405"},{"full_name":"Horwath, Ilona","id":"68836","last_name":"Horwath","first_name":"Ilona"},{"first_name":"Nils","id":"98454","full_name":"Klowait, Nils","last_name":"Klowait","orcid":"0000-0002-7347-099X"},{"full_name":"Lazarov, Stefan Teodorov","id":"90345","orcid":"0009-0009-0892-9483","last_name":"Lazarov","first_name":"Stefan Teodorov"},{"first_name":"Michael","full_name":"Lenke, Michael","last_name":"Lenke"},{"first_name":"Vivien","last_name":"Lohmer","full_name":"Lohmer, Vivien"},{"orcid":"0000-0002-5676-8233","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352","first_name":"Katharina"},{"orcid":"0000-0003-2364-9489","last_name":"Scharlau","id":"451","full_name":"Scharlau, Ingrid","first_name":"Ingrid"},{"first_name":"Amit","orcid":"0000-0002-7789-1521","last_name":"Singh","id":"91018","full_name":"Singh, Amit"},{"first_name":"Lutz","id":"37320","full_name":"Terfloth, Lutz","last_name":"Terfloth"},{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa","id":"86589"},{"last_name":"Wang","full_name":"Wang, Yu","first_name":"Yu"},{"first_name":"Annedore","full_name":"Wilmes, Annedore","last_name":"Wilmes"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"abstract":[{"text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.","lang":"eng"}],"file":[{"success":1,"relation":"main_file","content_type":"application/pdf","file_size":10114981,"access_level":"closed","file_id":"62730","file_name":"Buschmeier-etal-2025-COGSYS.pdf","date_updated":"2025-12-01T21:02:20Z","creator":"hbuschme","date_created":"2025-12-01T21:02:20Z"}],"publication":"Cognitive Systems Research","keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"ddc":["006"],"language":[{"iso":"eng"}],"year":"2025","quality_controlled":"1","title":"Forms of Understanding for XAI-Explanations","date_created":"2025-09-08T14:24:32Z"},{"citation":{"bibtex":"@inproceedings{Shivarpatna Venkatesh_Wang_Li_Bodden_2023, title={Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis}, DOI={<a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">10.48550/ARXIV.2301.04419</a>}, publisher={IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering)}, author={Shivarpatna Venkatesh, Ashwin Prasad and Wang, Jiawei and Li, Li and Bodden, Eric}, year={2023} }","mla":"Shivarpatna Venkatesh, Ashwin Prasad, et al. <i>Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis</i>. IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering), 2023, doi:<a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">10.48550/ARXIV.2301.04419</a>.","short":"A.P. Shivarpatna Venkatesh, J. Wang, L. Li, E. Bodden, in: IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering), 2023.","apa":"Shivarpatna Venkatesh, A. P., Wang, J., Li, L., &#38; Bodden, E. (2023). <i>Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis</i>. IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering). <a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">https://doi.org/10.48550/ARXIV.2301.04419</a>","ieee":"A. P. Shivarpatna Venkatesh, J. Wang, L. Li, and E. Bodden, “Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis,” presented at the IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering), 2023, doi: <a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">10.48550/ARXIV.2301.04419</a>.","chicago":"Shivarpatna Venkatesh, Ashwin Prasad, Jiawei Wang, Li Li, and Eric Bodden. “Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis.” IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering), 2023. <a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">https://doi.org/10.48550/ARXIV.2301.04419</a>.","ama":"Shivarpatna Venkatesh AP, Wang J, Li L, Bodden E. Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis. In: IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering); 2023. doi:<a href=\"https://doi.org/10.48550/ARXIV.2301.04419\">10.48550/ARXIV.2301.04419</a>"},"year":"2023","has_accepted_license":"1","conference":{"name":"IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering)"},"doi":"10.48550/ARXIV.2301.04419","title":"Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis","date_created":"2023-01-13T08:03:26Z","author":[{"id":"66637","full_name":"Shivarpatna Venkatesh, Ashwin Prasad","last_name":"Shivarpatna Venkatesh","first_name":"Ashwin Prasad"},{"first_name":"Jiawei","last_name":"Wang","full_name":"Wang, Jiawei"},{"first_name":"Li","last_name":"Li","full_name":"Li, Li"},{"last_name":"Bodden","orcid":"0000-0003-3470-3647","full_name":"Bodden, Eric","id":"59256","first_name":"Eric"}],"publisher":"IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering)","date_updated":"2025-04-07T10:18:03Z","oa":"1","status":"public","file":[{"content_type":"application/pdf","relation":"main_file","date_updated":"2023-01-26T10:48:40Z","date_created":"2023-01-26T10:48:40Z","creator":"ashwin","file_size":1862440,"access_level":"open_access","file_id":"40304","file_name":"2301.04419.pdf"}],"abstract":[{"lang":"eng","text":"Jupyter notebooks enable developers to interleave code snippets with rich-text and in-line visualizations. Data scientists use Jupyter notebook as the de-facto standard for creating and sharing machine-learning based solutions, primarily written in Python. Recent studies have demonstrated, however, that a large portion of Jupyter notebooks available on public platforms are undocumented and lacks a narrative structure. This reduces the readability of these notebooks. To address this shortcoming, this paper presents HeaderGen, a novel tool-based approach that automatically annotates code cells with categorical markdown headers based on a taxonomy of machine-learning operations, and classifies and displays function calls according to this taxonomy. For this functionality to be realized, HeaderGen enhances an existing call graph analysis in PyCG. To improve precision, HeaderGen extends PyCG's analysis with support for handling external library code and flow-sensitivity. The former is realized by facilitating the resolution of function return-types. Furthermore, HeaderGen uses type information to perform pattern matching on code syntax to annotate code cells.\r\nThe evaluation on 15 real-world Jupyter notebooks from Kaggle shows that HeaderGen's underlying call graph analysis yields high accuracy (96.4% precision and 95.9% recall). This is because HeaderGen can resolve return-types of external libraries where existing type inference tools such as pytype (by Google), pyright (by Microsoft), and Jedi fall short. The header generation has a precision of 82.2% and a recall rate of 96.8% with regard to headers created manually by experts. In a user study, HeaderGen helps participants finish comprehension and navigation tasks faster. All participants clearly perceive HeaderGen as useful to their task."}],"type":"conference","language":[{"iso":"eng"}],"file_date_updated":"2023-01-26T10:48:40Z","keyword":["static analysis","python","code comprehension","annotation","literate programming","jupyter notebook"],"ddc":["000"],"user_id":"15249","_id":"36522"}]
