[{"date_updated":"2026-01-14T14:29:03Z","publisher":"Frontiers Media SA","author":[{"full_name":"Peters, Tobias Martin","id":"92810","orcid":"0009-0008-5193-6243","last_name":"Peters","first_name":"Tobias Martin"},{"first_name":"Kai","id":"55908","full_name":"Biermeier, Kai","orcid":"0000-0002-2879-2359","last_name":"Biermeier"},{"orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451","first_name":"Ingrid"}],"date_created":"2026-01-14T14:21:59Z","volume":16,"title":"Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention","doi":"10.3389/fpsyg.2025.1694367","publication_status":"published","publication_identifier":{"issn":["1664-1078"]},"year":"2026","citation":{"ieee":"T. M. Peters, K. Biermeier, and I. Scharlau, “Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention,” <i>Frontiers in Psychology</i>, vol. 16, Art. no. 1694367, 2026, doi: <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>.","chicago":"Peters, Tobias Martin, Kai Biermeier, and Ingrid Scharlau. “Assessing Healthy Distrust in Human-AI Interaction: Interpreting Changes in Visual Attention.” <i>Frontiers in Psychology</i> 16 (2026). <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">https://doi.org/10.3389/fpsyg.2025.1694367</a>.","ama":"Peters TM, Biermeier K, Scharlau I. Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention. <i>Frontiers in Psychology</i>. 2026;16. doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>","apa":"Peters, T. M., Biermeier, K., &#38; Scharlau, I. (2026). Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention. <i>Frontiers in Psychology</i>, <i>16</i>, Article 1694367. <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">https://doi.org/10.3389/fpsyg.2025.1694367</a>","mla":"Peters, Tobias Martin, et al. “Assessing Healthy Distrust in Human-AI Interaction: Interpreting Changes in Visual Attention.” <i>Frontiers in Psychology</i>, vol. 16, 1694367, Frontiers Media SA, 2026, doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>.","short":"T.M. Peters, K. Biermeier, I. Scharlau, Frontiers in Psychology 16 (2026).","bibtex":"@article{Peters_Biermeier_Scharlau_2026, title={Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}, volume={16}, DOI={<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>}, number={1694367}, journal={Frontiers in Psychology}, publisher={Frontiers Media SA}, author={Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}, year={2026} }"},"intvolume":"        16","project":[{"name":"TRR 318 ; TP C01: Gesundes Misstrauen in Erklärungen","_id":"124"}],"_id":"63611","user_id":"92810","department":[{"_id":"424"},{"_id":"660"}],"article_type":"original","article_number":"1694367","keyword":["appropriate trust","healthy distrust","visual attention","Theory of Visual Attention","human-AI interaction","Bayesian cognitive model","image classification"],"language":[{"iso":"eng"}],"type":"journal_article","publication":"Frontiers in Psychology","abstract":[{"text":"When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.","lang":"eng"}],"status":"public"},{"publication_status":"published","publication_identifier":{"issn":["1389-0417"]},"citation":{"apa":"Visser, R., Peters, T. M., Scharlau, I., &#38; Hammer, B. (2025). Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>, Article 101357. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>","short":"R. Visser, T.M. Peters, I. Scharlau, B. Hammer, Cognitive Systems Research (2025).","bibtex":"@article{Visser_Peters_Scharlau_Hammer_2025, title={Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>}, number={101357}, journal={Cognitive Systems Research}, publisher={Elsevier BV}, author={Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}, year={2025} }","mla":"Visser, Roel, et al. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 101357, Elsevier BV, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","chicago":"Visser, Roel, Tobias Martin Peters, Ingrid Scharlau, and Barbara Hammer. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 2025. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>.","ieee":"R. Visser, T. M. Peters, I. Scharlau, and B. Hammer, “Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation,” <i>Cognitive Systems Research</i>, Art. no. 101357, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","ama":"Visser R, Peters TM, Scharlau I, Hammer B. Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>. Published online 2025. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>"},"year":"2025","author":[{"full_name":"Visser, Roel","last_name":"Visser","first_name":"Roel"},{"first_name":"Tobias Martin","orcid":"0009-0008-5193-6243","last_name":"Peters","id":"92810","full_name":"Peters, Tobias Martin"},{"first_name":"Ingrid","full_name":"Scharlau, Ingrid","id":"451","orcid":"0000-0003-2364-9489","last_name":"Scharlau"},{"last_name":"Hammer","full_name":"Hammer, Barbara","first_name":"Barbara"}],"date_created":"2025-05-02T09:26:15Z","date_updated":"2025-05-15T11:16:27Z","publisher":"Elsevier BV","doi":"10.1016/j.cogsys.2025.101357","title":"Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation","type":"journal_article","publication":"Cognitive Systems Research","status":"public","abstract":[{"text":"A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.\r\nIn this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.","lang":"eng"}],"user_id":"92810","department":[{"_id":"424"},{"_id":"660"}],"project":[{"_id":"124","name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen"}],"_id":"59756","language":[{"iso":"eng"}],"article_number":"101357","keyword":["XAI","Appropriate trust","Distrust","Reliance","Human-centric evaluation","Trustworthy AI"]},{"year":"2025","citation":{"chicago":"Peters, Tobias Martin, and Ingrid Scharlau. “Interacting with Fallible AI: Is Distrust Helpful When Receiving AI Misclassifications?” <i>Frontiers in Psychology</i> 16 (2025). <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">https://doi.org/10.3389/fpsyg.2025.1574809</a>.","ieee":"T. M. Peters and I. Scharlau, “Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?,” <i>Frontiers in Psychology</i>, vol. 16, 2025, doi: <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>.","ama":"Peters TM, Scharlau I. Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications? <i>Frontiers in Psychology</i>. 2025;16. doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>","apa":"Peters, T. M., &#38; Scharlau, I. (2025). Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications? <i>Frontiers in Psychology</i>, <i>16</i>. <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">https://doi.org/10.3389/fpsyg.2025.1574809</a>","short":"T.M. Peters, I. Scharlau, Frontiers in Psychology 16 (2025).","bibtex":"@article{Peters_Scharlau_2025, title={Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}, volume={16}, DOI={<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>}, journal={Frontiers in Psychology}, author={Peters, Tobias Martin and Scharlau, Ingrid}, year={2025} }","mla":"Peters, Tobias Martin, and Ingrid Scharlau. “Interacting with Fallible AI: Is Distrust Helpful When Receiving AI Misclassifications?” <i>Frontiers in Psychology</i>, vol. 16, 2025, doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>."},"intvolume":"        16","publication_status":"published","title":"Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?","doi":"10.3389/fpsyg.2025.1574809","date_updated":"2025-05-27T09:10:09Z","date_created":"2025-05-02T09:22:39Z","author":[{"full_name":"Peters, Tobias Martin","id":"92810","orcid":"0009-0008-5193-6243","last_name":"Peters","first_name":"Tobias Martin"},{"first_name":"Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451"}],"volume":16,"abstract":[{"lang":"eng","text":"Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,\r\ntrustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,\r\nfor example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated\r\nclassification as advice. Research to improve such interactions often aims to foster the\r\nuser’s trust, which in turn should improve the combined human-AI performance. Given that AI\r\nmodels can err, we argue that the possibility to critically review, thus to distrust, an AI decision is\r\nan equally interesting target of research.\r\nWe created two image classification scenarios in which the participants received mock-up\r\nAI advice. The quality of the advice decreases for a phase of the experiment. We studied the\r\ntask performance, trust and distrust of the participants, and tested whether an instruction to\r\nremain skeptical and review each piece of advice led to a better performance compared to a\r\nneutral condition. Our results indicate that this instruction does not improve but rather worsens\r\nthe participants’ performance. Repeated single-item self-report of trust and distrust shows an\r\nincrease in trust and a decrease in distrust after the drop in the AI’s classification quality, with no\r\ndifference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory\r\nanalysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether\r\nthe problems of under- and over-reliance have been mitigated. We discuss implications of our\r\nresults for the usage of disclaimers before interacting with AI, as prominently used in current\r\nLLM-based chatbots, and for trust and distrust research."}],"status":"public","type":"journal_article","publication":"Frontiers in Psychology","article_type":"original","keyword":["trust in AI","trust","distrust","human-AI interaction","Signal Detection Theory","Bayesian parameter estimation","image classification"],"language":[{"iso":"eng"}],"project":[{"name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen","_id":"124"}],"_id":"59755","user_id":"92810","department":[{"_id":"424"},{"_id":"660"}]},{"article_type":"original","alternative_title":["Hybride Entscheidungsunterstützung in der Produktentstehung - Mit Data Science und Künstlicher Intelligenz die Leistungsfähigkeit erhöhen"],"_id":"58650","department":[{"_id":"152"}],"user_id":"405","status":"public","type":"journal_article","doi":"10.30844/i4sd.25.1.18","main_file_link":[{"open_access":"1"}],"oa":"1","date_updated":"2025-02-15T09:40:52Z","volume":2025,"author":[{"first_name":"Iris","full_name":"Gräßler, Iris","id":"47565","last_name":"Gräßler","orcid":"0000-0001-5765-971X"},{"orcid":"http://orcid.org/0000-0001-8778-2989","last_name":"Pottebaum","id":"405","full_name":"Pottebaum, Jens","first_name":"Jens"},{"full_name":"Nyhuis, Peter","last_name":"Nyhuis","first_name":"Peter"},{"last_name":"Stark","full_name":"Stark, Rainer","first_name":"Rainer"},{"first_name":"Klaus-Dieter","last_name":"Thoben","full_name":"Thoben, Klaus-Dieter"},{"last_name":"Wiederkehr","full_name":"Wiederkehr, Petra","first_name":"Petra"}],"intvolume":"      2025","citation":{"ama":"Gräßler I, Pottebaum J, Nyhuis P, Stark R, Thoben K-D, Wiederkehr P. Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence. <i>Industry 40 Science</i>. 2025;2025(1). doi:<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>","ieee":"I. Gräßler, J. Pottebaum, P. Nyhuis, R. Stark, K.-D. Thoben, and P. Wiederkehr, “Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence,” <i>Industry 4.0 Science</i>, vol. 2025, no. 1, 2025, doi: <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>.","chicago":"Gräßler, Iris, Jens Pottebaum, Peter Nyhuis, Rainer Stark, Klaus-Dieter Thoben, and Petra Wiederkehr. “Hybrid Decision Support in Product Creation - Improving Performance with Data Science and Artificial Intelligence.” <i>Industry 4.0 Science</i> 2025, no. 1 (2025). <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">https://doi.org/10.30844/i4sd.25.1.18</a>.","apa":"Gräßler, I., Pottebaum, J., Nyhuis, P., Stark, R., Thoben, K.-D., &#38; Wiederkehr, P. (2025). Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence. <i>Industry 4.0 Science</i>, <i>2025</i>(1). <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">https://doi.org/10.30844/i4sd.25.1.18</a>","bibtex":"@article{Gräßler_Pottebaum_Nyhuis_Stark_Thoben_Wiederkehr_2025, title={Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence}, volume={2025}, DOI={<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>}, number={1}, journal={Industry 4.0 Science}, publisher={GITO mbH Verlag}, author={Gräßler, Iris and Pottebaum, Jens and Nyhuis, Peter and Stark, Rainer and Thoben, Klaus-Dieter and Wiederkehr, Petra}, year={2025} }","short":"I. Gräßler, J. Pottebaum, P. Nyhuis, R. Stark, K.-D. Thoben, P. Wiederkehr, Industry 4.0 Science 2025 (2025).","mla":"Gräßler, Iris, et al. “Hybrid Decision Support in Product Creation - Improving Performance with Data Science and Artificial Intelligence.” <i>Industry 4.0 Science</i>, vol. 2025, no. 1, GITO mbH Verlag, 2025, doi:<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>."},"publication_identifier":{"issn":["2942-6170"]},"publication_status":"published","keyword":["AI","artificial intelligence","Data Science","decision support","extreme data","Künstliche Intelligenz","product creation","product development"],"language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Technical systems are characterized by increasing interdisciplinarity, complexity and networking. A product and its corresponding production systems require interdisciplinary multi-objective optimization. Sustainability and recyclability demands increase said complexity. The efficiency of previously established engineering methods is reaching its limits, which can only be overcome by systematic integration of extreme data. The aim of \"hybrid decision support\" is as follows: Data science and artificial intelligence should be used to supplement human capabilities in conjunction with existing heuristics, methods, modeling and simulation to increase the efficiency of product creation."}],"publication":"Industry 4.0 Science","title":"Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence","publisher":"GITO mbH Verlag","date_created":"2025-02-15T09:31:30Z","year":"2025","quality_controlled":"1","issue":"1"},{"_id":"61410","department":[{"_id":"195"}],"user_id":"59677","keyword":["AI","AI consciousness","AI ethics","service systems"],"ddc":["380"],"article_type":"original","language":[{"iso":"eng"}],"file_date_updated":"2025-11-10T10:20:48Z","publication":"Journal of Service Management","type":"journal_article","abstract":[{"text":"Purpose: The purpose of this study is to identify, analyze, and explain the implications that could\r\narise for service settings if AI systems develop, or are perceived to develop, consciousness – the\r\nability to acknowledge their own existence and the capacity for positive or negative experiences.\r\n\r\nDesign/methodology/approach: This study proposes and explores four hypothetical scenarios in\r\nwhich conscious AI in service could manifest. We contextualize our resulting typology in the\r\nhealth service context and integrate extant literature on technology-enabled service, AI\r\nconsciousness, and AI ethics into the narrative.\r\n\r\nFindings: This study provides a unique theoretical contribution to service research in the form of\r\na Type IV theory. It enables future service researchers to apprehend, explain, and predict how\r\nfunctionally conscious AI in service might unfold.\r\n\r\nOriginality: An increasingly prolific public discourse acknowledges that conscious AI systems\r\nmay emerge. Against this backdrop, this study aims to systematically explore a question that is\r\nperhaps the most critical and timely, but also inherently speculative, in relation to AI in service\r\nresearch by introducing much-needed theory and terminology.\r\n\r\nPractical implications: The ethical use of conscious AI in service could emerge as a distinct\r\ncompetitive advantage in the future. Achieving this outcome involves speculative yet actionable\r\nrecommendations that include training, guiding, and controlling how humans engage with such\r\nsystems, developing appropriate wellbeing protocols for functionally conscious AI systems, and\r\nestablishing AI rights and governance frameworks.","lang":"eng"}],"status":"public","file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2025-11-10T10:20:48Z","creator":"dabe","date_created":"2025-11-10T10:20:48Z","file_size":743479,"file_id":"62150","access_level":"closed","file_name":"Breidbach et al, 2025_Conscious AI in Service_w link.pdf"}],"publisher":"Emerald","date_updated":"2025-11-10T10:22:59Z","author":[{"full_name":"Breidbach, Christoph","last_name":"Breidbach","first_name":"Christoph"},{"first_name":"Casper Ferm","full_name":"Lars-Erik, Casper Ferm","last_name":"Lars-Erik"},{"first_name":"Paul","last_name":"Maglio","full_name":"Maglio, Paul"},{"last_name":"Beverungen","full_name":"Beverungen, Daniel","id":"59677","first_name":"Daniel"},{"first_name":"Jochen","last_name":"Wirtz","full_name":"Wirtz, Jochen"},{"first_name":"Alex","full_name":"Twigg, Alex","last_name":"Twigg"}],"date_created":"2025-09-23T11:47:47Z","title":"Conscious Artificial Intelligence in Service","has_accepted_license":"1","quality_controlled":"1","publication_status":"inpress","year":"2025","citation":{"ama":"Breidbach C, Lars-Erik CF, Maglio P, Beverungen D, Wirtz J, Twigg A. Conscious Artificial Intelligence in Service. <i>Journal of Service Management</i>.","chicago":"Breidbach, Christoph, Casper Ferm Lars-Erik, Paul Maglio, Daniel Beverungen, Jochen Wirtz, and Alex Twigg. “Conscious Artificial Intelligence in Service.” <i>Journal of Service Management</i>, n.d.","ieee":"C. Breidbach, C. F. Lars-Erik, P. Maglio, D. Beverungen, J. Wirtz, and A. Twigg, “Conscious Artificial Intelligence in Service,” <i>Journal of Service Management</i>.","apa":"Breidbach, C., Lars-Erik, C. F., Maglio, P., Beverungen, D., Wirtz, J., &#38; Twigg, A. (n.d.). Conscious Artificial Intelligence in Service. <i>Journal of Service Management</i>.","bibtex":"@article{Breidbach_Lars-Erik_Maglio_Beverungen_Wirtz_Twigg, title={Conscious Artificial Intelligence in Service}, journal={Journal of Service Management}, publisher={Emerald}, author={Breidbach, Christoph and Lars-Erik, Casper Ferm and Maglio, Paul and Beverungen, Daniel and Wirtz, Jochen and Twigg, Alex} }","short":"C. Breidbach, C.F. Lars-Erik, P. Maglio, D. Beverungen, J. Wirtz, A. Twigg, Journal of Service Management (n.d.).","mla":"Breidbach, Christoph, et al. “Conscious Artificial Intelligence in Service.” <i>Journal of Service Management</i>, Emerald."}},{"date_created":"2025-09-08T14:24:32Z","title":"Forms of Understanding for XAI-Explanations","quality_controlled":"1","year":"2025","language":[{"iso":"eng"}],"ddc":["006"],"keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"publication":"Cognitive Systems Research","file":[{"date_updated":"2025-12-01T21:02:20Z","date_created":"2025-12-01T21:02:20Z","creator":"hbuschme","file_size":10114981,"access_level":"closed","file_id":"62730","file_name":"Buschmeier-etal-2025-COGSYS.pdf","content_type":"application/pdf","success":1,"relation":"main_file"}],"abstract":[{"text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.","lang":"eng"}],"author":[{"first_name":"Hendrik","orcid":"0000-0002-9613-5713","last_name":"Buschmeier","full_name":"Buschmeier, Hendrik","id":"76456"},{"id":"27152","full_name":"Buhl, Heike M.","last_name":"Buhl","first_name":"Heike M."},{"first_name":"Friederike","last_name":"Kern","full_name":"Kern, Friederike"},{"first_name":"Angela","full_name":"Grimminger, Angela","id":"57578","last_name":"Grimminger"},{"last_name":"Beierling","full_name":"Beierling, Helen","id":"50995","first_name":"Helen"},{"orcid":"0000-0002-9997-9241","last_name":"Fisher","full_name":"Fisher, Josephine Beryl","id":"56345","first_name":"Josephine Beryl"},{"orcid":"0000-0002-9593-7220","last_name":"Groß","id":"93405","full_name":"Groß, André","first_name":"André"},{"first_name":"Ilona","id":"68836","full_name":"Horwath, Ilona","last_name":"Horwath"},{"first_name":"Nils","full_name":"Klowait, Nils","id":"98454","orcid":"0000-0002-7347-099X","last_name":"Klowait"},{"last_name":"Lazarov","orcid":"0009-0009-0892-9483","full_name":"Lazarov, Stefan Teodorov","id":"90345","first_name":"Stefan Teodorov"},{"first_name":"Michael","last_name":"Lenke","full_name":"Lenke, Michael"},{"last_name":"Lohmer","full_name":"Lohmer, Vivien","first_name":"Vivien"},{"id":"50352","full_name":"Rohlfing, Katharina","orcid":"0000-0002-5676-8233","last_name":"Rohlfing","first_name":"Katharina"},{"full_name":"Scharlau, Ingrid","id":"451","orcid":"0000-0003-2364-9489","last_name":"Scharlau","first_name":"Ingrid"},{"first_name":"Amit","orcid":"0000-0002-7789-1521","last_name":"Singh","id":"91018","full_name":"Singh, Amit"},{"full_name":"Terfloth, Lutz","id":"37320","last_name":"Terfloth","first_name":"Lutz"},{"first_name":"Anna-Lisa","id":"86589","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer"},{"first_name":"Yu","full_name":"Wang, Yu","last_name":"Wang"},{"last_name":"Wilmes","full_name":"Wilmes, Annedore","first_name":"Annedore"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"}],"volume":94,"oa":"1","date_updated":"2025-12-05T15:32:25Z","main_file_link":[{"url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub","open_access":"1"}],"doi":"10.1016/j.cogsys.2025.101419","publication_status":"published","has_accepted_license":"1","citation":{"ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>"},"intvolume":"        94","user_id":"57578","department":[{"_id":"660"}],"project":[{"_id":"111","name":"TRR 318; TP A01: Adaptives Erklären"},{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"_id":"122","name":"TRR 318 - Subproject B3"},{"_id":"123","name":"TRR 318 - Subproject B5"},{"name":"TRR 318 - Project Area Ö","_id":"119"}],"_id":"61156","file_date_updated":"2025-12-01T21:02:20Z","article_type":"original","article_number":"101419","type":"journal_article","status":"public"},{"year":"2025","citation":{"ama":"Donner JAT, Schlüter A. Development of an AI-driven decentralized control for fifth generation district heating and cooling networks. In: <i>SDEWES Conference 2025</i>. ; 2025.","chicago":"Donner, Johannes Aurelius Tamino, and Alexander Schlüter. “Development of an AI-Driven Decentralized Control for Fifth Generation District Heating and Cooling Networks.” In <i>SDEWES Conference 2025</i>, 2025.","ieee":"J. A. T. Donner and A. Schlüter, “Development of an AI-driven decentralized control for fifth generation district heating and cooling networks,” presented at the 20th SDEWES Conference, Dubrovnik, 2025.","apa":"Donner, J. A. T., &#38; Schlüter, A. (2025). Development of an AI-driven decentralized control for fifth generation district heating and cooling networks. <i>SDEWES Conference 2025</i>. 20th SDEWES Conference, Dubrovnik.","mla":"Donner, Johannes Aurelius Tamino, and Alexander Schlüter. “Development of an AI-Driven Decentralized Control for Fifth Generation District Heating and Cooling Networks.” <i>SDEWES Conference 2025</i>, 2025.","bibtex":"@inproceedings{Donner_Schlüter_2025, title={Development of an AI-driven decentralized control for fifth generation district heating and cooling networks}, booktitle={SDEWES Conference 2025}, author={Donner, Johannes Aurelius Tamino and Schlüter, Alexander}, year={2025} }","short":"J.A.T. Donner, A. Schlüter, in: SDEWES Conference 2025, 2025."},"date_updated":"2026-01-06T07:53:40Z","author":[{"first_name":"Johannes Aurelius Tamino","last_name":"Donner","orcid":"0009-0007-4757-4393","id":"72054","full_name":"Donner, Johannes Aurelius Tamino"},{"first_name":"Alexander","orcid":"0000-0002-2569-1624","last_name":"Schlüter","id":"103302","full_name":"Schlüter, Alexander"}],"date_created":"2025-12-10T12:30:59Z","title":"Development of an AI-driven decentralized control for fifth generation district heating and cooling networks","conference":{"end_date":"10.10.2025","location":"Dubrovnik","name":"20th SDEWES Conference","start_date":"05.10.2025"},"type":"conference_abstract","publication":"SDEWES Conference 2025","status":"public","_id":"63019","user_id":"103302","department":[{"_id":"876"},{"_id":"321"},{"_id":"9"},{"_id":"393"}],"keyword":["5GDHC","district heating","DHC","waste heat","AI-Driven"],"language":[{"iso":"eng"}]},{"status":"public","editor":[{"first_name":"J.","full_name":"Malmqvist, J.","last_name":"Malmqvist"},{"last_name":"Candi","full_name":"Candi, M.","first_name":"M."},{"first_name":"R.","full_name":"Saemundsson, R.","last_name":"Saemundsson"},{"first_name":"F.","full_name":"Bystrom, F.","last_name":"Bystrom"},{"full_name":"Isaksson, O.","last_name":"Isaksson","first_name":"O."}],"abstract":[{"text":"Developing Intelligent Technical Systems (ITS) involves a complex process encompassing planning, analysis, design, production, and maintenance. Model-Based Systems Engineering (MBSE) is a key methodology for systematic systems engineering. Designing models for ITS requires harmonious interaction of various elements, posing a challenge in MBSE. Leveraging Generative Artificial Intelligence, we generated a dataset for modeling, using prompt engineering on large language models. The generated artifacts can aid engineers in MBSE design or serve as synthetic training data for AI assistants.","lang":"eng"}],"type":"conference","publication":"DS 130: Proceedings of NordDesign 2024","language":[{"iso":"eng"}],"keyword":["Data Driven Design","Design Automation","Systems Engineering (SE)","Artificial Intelligence (AI)"],"user_id":"86782","_id":"56166","citation":{"apa":"Kulkarni, P. J., Tissen, D., Bernijazov, R., &#38; Dumitrescu, R. (2024). Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence. In J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, &#38; O. Isaksson (Eds.), <i>DS 130: Proceedings of NordDesign 2024</i> (pp. 617–625). <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">https://doi.org/10.35199/NORDDESIGN2024.66</a>","mla":"Kulkarni, Pranav Jayant, et al. “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence.” <i>DS 130: Proceedings of NordDesign 2024</i>, edited by J. Malmqvist et al., 2024, pp. 617–25, doi:<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>.","bibtex":"@inproceedings{Kulkarni_Tissen_Bernijazov_Dumitrescu_2024, title={Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence}, DOI={<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>}, booktitle={DS 130: Proceedings of NordDesign 2024}, author={Kulkarni, Pranav Jayant and Tissen, Denis and Bernijazov, Ruslan and Dumitrescu, Roman}, editor={Malmqvist, J. and Candi, M. and Saemundsson, R. and Bystrom, F. and Isaksson, O.}, year={2024}, pages={617–625} }","short":"P.J. Kulkarni, D. Tissen, R. Bernijazov, R. Dumitrescu, in: J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, O. Isaksson (Eds.), DS 130: Proceedings of NordDesign 2024, 2024, pp. 617–625.","ieee":"P. J. Kulkarni, D. Tissen, R. Bernijazov, and R. Dumitrescu, “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence,” in <i>DS 130: Proceedings of NordDesign 2024</i>, Reykjavik, 2024, pp. 617–625, doi: <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>.","chicago":"Kulkarni, Pranav Jayant, Denis Tissen, Ruslan Bernijazov, and Roman Dumitrescu. “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence.” In <i>DS 130: Proceedings of NordDesign 2024</i>, edited by J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, and O. Isaksson, 617–25, 2024. <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">https://doi.org/10.35199/NORDDESIGN2024.66</a>.","ama":"Kulkarni PJ, Tissen D, Bernijazov R, Dumitrescu R. Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence. In: Malmqvist J, Candi M, Saemundsson R, Bystrom F, Isaksson O, eds. <i>DS 130: Proceedings of NordDesign 2024</i>. ; 2024:617-625. doi:<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>"},"page":"617-625","year":"2024","related_material":{"link":[{"relation":"confirmation","url":"https://www.designsociety.org/publication/47658/Towards+Automated+Design%3A+Automatically+Generating+Modeling+Elements+with+Prompt+Engineering+and+Generative+Artificial+Intelligence"}]},"publication_status":"epub_ahead","publication_identifier":{"unknown":["978-1-912254-21-7"]},"conference":{"location":"Reykjavik","end_date":"2024-08-14","start_date":"2024-08-12","name":"NordDesign Conference 2024"},"doi":"10.35199/NORDDESIGN2024.66","title":"Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence","author":[{"first_name":"Pranav Jayant","last_name":"Kulkarni","id":"86782","full_name":"Kulkarni, Pranav Jayant"},{"first_name":"Denis","id":"44458","full_name":"Tissen, Denis","last_name":"Tissen"},{"first_name":"Ruslan","full_name":"Bernijazov, Ruslan","id":"36312","last_name":"Bernijazov"},{"full_name":"Dumitrescu, Roman","id":"16190","last_name":"Dumitrescu","first_name":"Roman"}],"date_created":"2024-09-17T09:56:43Z","date_updated":"2024-09-17T09:57:07Z"},{"user_id":"47857","_id":"56277","language":[{"iso":"eng"}],"keyword":["AI","argumentation mining","discourse history","(automated","learner-sensitive) feedback"],"publication":"Proceedings of the Tenth Conference of the International Society for the Study of Argumentation","type":"conference","status":"public","abstract":[{"lang":"eng","text":"What is learner-sensitive feedback to argumentative learner texts when it is to be issued computer- based? Learning stages are difficult to quantify. The paper provides insight into the history of research since the 1980s and a preview of what this automated feedback might look like. These questions are embedded in a research project at the Universities of Paderborn and Hannover, Germany, from which a software (project name ArgSchool) emerges that will provide such feedback."}],"author":[{"id":"93839","full_name":"Kilsbach, Sebastian","last_name":"Kilsbach","first_name":"Sebastian"},{"first_name":"Nadine","full_name":"Michel, Nadine","id":"47857","last_name":"Michel"}],"date_created":"2024-09-30T09:24:12Z","date_updated":"2024-09-30T09:25:14Z","conference":{"location":"Leiden","end_date":"2023-07-07","start_date":"2023-07-04","name":"Tenth Conference of the International Society for the Study of Argumentation"},"title":"Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts","citation":{"short":"S. Kilsbach, N. Michel, in: Proceedings of the Tenth Conference of the International Society for the Study of Argumentation, 2024.","bibtex":"@inproceedings{Kilsbach_Michel_2024, title={Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts}, booktitle={Proceedings of the Tenth Conference of the International Society for the Study of Argumentation}, author={Kilsbach, Sebastian and Michel, Nadine}, year={2024} }","mla":"Kilsbach, Sebastian, and Nadine Michel. “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts.” <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>, 2024.","apa":"Kilsbach, S., &#38; Michel, N. (2024). Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts. <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>. Tenth Conference of the International Society for the Study of Argumentation, Leiden.","chicago":"Kilsbach, Sebastian, and Nadine Michel. “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts.” In <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>, 2024.","ieee":"S. Kilsbach and N. Michel, “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts,” presented at the Tenth Conference of the International Society for the Study of Argumentation, Leiden, 2024.","ama":"Kilsbach S, Michel N. Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts. In: <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>. ; 2024."},"year":"2024"},{"publisher":"ICRC Humanitarian Law & Policy Blog","date_updated":"2024-11-26T09:49:48Z","oa":"1","date_created":"2024-09-30T11:44:28Z","author":[{"first_name":"Ishmael","id":"105772","full_name":"Bhila, Ishmael","last_name":"Bhila"},{"first_name":"Ingvild","last_name":"Bode","full_name":"Bode, Ingvild"}],"title":"The problem of algorithmic bias in AI-based military decision support systems","main_file_link":[{"url":"https://blogs.icrc.org/law-and-policy/2024/09/03/the-problem-of-algorithmic-bias-in-ai-based-military-decision-support-systems/","open_access":"1"}],"publication_status":"published","has_accepted_license":"1","related_material":{"link":[{"url":"https://blogs.icrc.org/law-and-policy/2024/09/03/the-problem-of-algorithmic-bias-in-ai-based-military-decision-support-systems/","relation":"confirmation"}]},"year":"2024","citation":{"ama":"Bhila I, Bode I. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog; 2024.","chicago":"Bhila, Ishmael, and Ingvild Bode. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","ieee":"I. Bhila and I. Bode, <i>The problem of algorithmic bias in AI-based military decision support systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","bibtex":"@book{Bhila_Bode_2024, title={The problem of algorithmic bias in AI-based military decision support systems}, publisher={ICRC Humanitarian Law &#38; Policy Blog}, author={Bhila, Ishmael and Bode, Ingvild}, year={2024} }","short":"I. Bhila, I. Bode, The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems, ICRC Humanitarian Law &#38; Policy Blog, 2024.","mla":"Bhila, Ishmael, and Ingvild Bode. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","apa":"Bhila, I., &#38; Bode, I. (2024). <i>The problem of algorithmic bias in AI-based military decision support systems</i>. ICRC Humanitarian Law &#38; Policy Blog."},"_id":"56282","user_id":"105772","keyword":["Algorithmic Bias","AI","Decision Support Systems","Autonomous Weapons Systems"],"language":[{"iso":"eng"}],"type":"misc","abstract":[{"text":"Algorithmic bias has long been recognized as a key problem affecting decision-making processes that integrate artificial intelligence (AI) technologies. The increased use of AI in making military decisions relevant to the use of force has sustained such questions about biases in these technologies and in how human users programme with and rely on data based on hierarchized socio-cultural norms, knowledges, and modes of attention.\r\n\r\nIn this post, Dr Ingvild Bode, Professor at the Center for War Studies, University of Southern Denmark, and Ishmael Bhila, PhD researcher at the “Meaningful Human Control: Between Regulation and Reflexion” project, Paderborn University, unpack the problem of algorithmic bias with reference to AI-based decision support systems (AI DSS). They examine three categories of algorithmic bias – preexisting bias, technical bias, and emergent bias – across four lifecycle stages of an AI DSS, concluding that stakeholders in the ongoing discussion about AI in the military domain should consider the impact of algorithmic bias on AI DSS more seriously.","lang":"eng"}],"status":"public"},{"issue":"3","citation":{"ama":"Esposito E. Does Explainability Require Transparency? <i>Sociologica</i>. 2023;16(3):17-27. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>","chicago":"Esposito, Elena . “Does Explainability Require Transparency?” <i>Sociologica</i> 16, no. 3 (2023): 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>.","ieee":"E. Esposito, “Does Explainability Require Transparency?,” <i>Sociologica</i>, vol. 16, no. 3, pp. 17–27, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>.","bibtex":"@article{Esposito_2023, title={Does Explainability Require Transparency?}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena }, year={2023}, pages={17–27} }","mla":"Esposito, Elena. “Does Explainability Require Transparency?” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 17–27, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>.","short":"E. Esposito, Sociologica 16 (2023) 17–27.","apa":"Esposito, E. (2023). Does Explainability Require Transparency? <i>Sociologica</i>, <i>16</i>(3), 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>"},"page":"17-27","intvolume":"        16","year":"2023","author":[{"last_name":"Esposito","full_name":"Esposito, Elena ","first_name":"Elena "}],"date_created":"2024-02-18T10:16:43Z","volume":16,"date_updated":"2024-02-26T08:46:26Z","doi":"10.6092/ISSN.1971-8853/15804","title":"Does Explainability Require Transparency?","type":"journal_article","publication":"Sociologica","status":"public","abstract":[{"text":"Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.","lang":"eng"}],"user_id":"54779","department":[{"_id":"660"}],"project":[{"_id":"121","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","grant_number":"438445824"}],"_id":"51368","language":[{"iso":"eng"}],"keyword":["Explainable AI","Transparency","Explanation","Communication","Sociological systems theory"]},{"language":[{"iso":"eng"}],"keyword":["Explainable AI","Inexplicability","Transparency","Explanation","Opacity","Contestability"],"department":[{"_id":"660"}],"user_id":"54779","_id":"51369","project":[{"_id":"121","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","grant_number":"438445824"}],"status":"public","abstract":[{"text":"This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.","lang":"eng"}],"publication":"Sociologica","type":"journal_article","doi":"10.6092/ISSN.1971-8853/16265","title":"Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction","volume":16,"author":[{"last_name":"Esposito","full_name":"Esposito, Elena","first_name":"Elena"}],"date_created":"2024-02-18T10:23:23Z","date_updated":"2024-02-26T08:45:56Z","page":"1-4","intvolume":"        16","citation":{"apa":"Esposito, E. (2023). Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>, <i>16</i>(3), 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>","mla":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 1–4, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","short":"E. Esposito, Sociologica 16 (2023) 1–4.","bibtex":"@article{Esposito_2023, title={Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena}, year={2023}, pages={1–4} }","ama":"Esposito E. Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>. 2023;16(3):1-4. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>","ieee":"E. Esposito, “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction,” <i>Sociologica</i>, vol. 16, no. 3, pp. 1–4, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","chicago":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i> 16, no. 3 (2023): 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>."},"year":"2023","issue":"3"},{"language":[{"iso":"eng"}],"keyword":["fairness in AI","SLR","hiring","AI implementation","AI-based algorithms"],"user_id":"77643","_id":"33490","status":"public","abstract":[{"lang":"eng","text":"Algorithmic fairness in Information Systems (IS) is a concept that aims to mitigate systematic discrimination and bias in automated decision-making. However, previous research argued that different fairness criteria are often incompatible. In hiring, AI is used to assess and rank applicants according to their fit for vacant positions. However, various types of bias also exist for AI-based algorithms (e.g., using biased historical data). To reduce AI’s bias and thereby unfair treatment, we conducted a systematic literature review to identify suitable strategies for the context of hiring. We identified nine fundamental articles in this context and extracted four types of approaches to address unfairness in AI, namely pre-process, in-process, post-process, and feature selection. Based on our findings, we (a) derived a research agenda for future studies and (b) proposed strategies for practitioners who design and develop AIs for hiring purposes."}],"type":"conference","publication":"Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)","main_file_link":[{"url":"https://hdl.handle.net/10125/102654"}],"conference":{"name":"Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)","start_date":"2023-01-03","end_date":"2023-01-06"},"title":"Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research","date_created":"2022-09-27T12:39:12Z","author":[{"last_name":"Rieskamp","id":"77643","full_name":"Rieskamp, Jonas","first_name":"Jonas"},{"full_name":"Hofeditz, Lennart","last_name":"Hofeditz","first_name":"Lennart"},{"full_name":"Mirbabaie, Milad","id":"88691","last_name":"Mirbabaie","first_name":"Milad"},{"last_name":"Stieglitz","full_name":"Stieglitz, Stefan","first_name":"Stefan"}],"date_updated":"2023-02-06T14:39:51Z","citation":{"ieee":"J. Rieskamp, L. Hofeditz, M. Mirbabaie, and S. Stieglitz, “Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research,” presented at the Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS), 2023.","chicago":"Rieskamp, Jonas, Lennart Hofeditz, Milad Mirbabaie, and Stefan Stieglitz. “Approaches to Improve Fairness When Deploying AI-Based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research.” In <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>, 2023.","ama":"Rieskamp J, Hofeditz L, Mirbabaie M, Stieglitz S. Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research. In: <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>. ; 2023.","bibtex":"@inproceedings{Rieskamp_Hofeditz_Mirbabaie_Stieglitz_2023, title={Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research}, booktitle={Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)}, author={Rieskamp, Jonas and Hofeditz, Lennart and Mirbabaie, Milad and Stieglitz, Stefan}, year={2023} }","mla":"Rieskamp, Jonas, et al. “Approaches to Improve Fairness When Deploying AI-Based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research.” <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>, 2023.","short":"J. Rieskamp, L. Hofeditz, M. Mirbabaie, S. Stieglitz, in: Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS), 2023.","apa":"Rieskamp, J., Hofeditz, L., Mirbabaie, M., &#38; Stieglitz, S. (2023). Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research. <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>. Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)."},"year":"2023"},{"title":"HIEF: a holistic interpretability and explainability framework","doi":"10.1080/12460125.2023.2207268","main_file_link":[{"url":"https://www.tandfonline.com/doi/full/10.1080/12460125.2023.2207268"}],"publisher":"Taylor & Francis","date_updated":"2023-05-26T05:08:36Z","author":[{"last_name":"Kucklick","id":"77066","full_name":"Kucklick, Jan-Peter","first_name":"Jan-Peter"}],"date_created":"2023-05-26T05:04:45Z","year":"2023","page":"1-41","citation":{"ama":"Kucklick J-P. HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>. Published online 2023:1-41. doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>","ieee":"J.-P. Kucklick, “HIEF: a holistic interpretability and explainability framework,” <i>Journal of Decision Systems</i>, pp. 1–41, 2023, doi: <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","chicago":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, 2023, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>.","bibtex":"@article{Kucklick_2023, title={HIEF: a holistic interpretability and explainability framework}, DOI={<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>}, journal={Journal of Decision Systems}, publisher={Taylor &#38; Francis}, author={Kucklick, Jan-Peter}, year={2023}, pages={1–41} }","short":"J.-P. Kucklick, Journal of Decision Systems (2023) 1–41.","mla":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, Taylor &#38; Francis, 2023, pp. 1–41, doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","apa":"Kucklick, J.-P. (2023). HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>"},"publication_identifier":{"issn":["1246-0125","2116-7052"]},"publication_status":"published","keyword":["Explainable AI (XAI)","machine learning","interpretability","real estate appraisal","framework","taxonomy"],"language":[{"iso":"eng"}],"_id":"45299","department":[{"_id":"195"},{"_id":"196"}],"user_id":"77066","abstract":[{"lang":"eng","text":"Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity."}],"status":"public","publication":"Journal of Decision Systems","type":"journal_article"},{"type":"conference","status":"public","abstract":[{"lang":"eng","text":"We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS."}],"department":[{"_id":"660"}],"user_id":"93275","_id":"56477","project":[{"_id":"128","name":"TRR 318 - C5: TRR 318 - Subproject C5"}],"language":[{"iso":"eng"}],"keyword":["Explainable AI","Clinical decision support","Bayesian network","Counterfactual explanations"],"citation":{"mla":"Liedeker, Felix, and Philipp Cimiano. <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. 2023.","short":"F. Liedeker, P. Cimiano, in: 2023.","bibtex":"@inproceedings{Liedeker_Cimiano_2023, title={A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}, author={Liedeker, Felix and Cimiano, Philipp}, year={2023} }","apa":"Liedeker, F., &#38; Cimiano, P. (2023). <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon.","ama":"Liedeker F, Cimiano P. A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations. In: ; 2023.","chicago":"Liedeker, Felix, and Philipp Cimiano. “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” 2023.","ieee":"F. Liedeker and P. Cimiano, “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” presented at the xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon, 2023."},"year":"2023","author":[{"last_name":"Liedeker","id":"93275","full_name":"Liedeker, Felix","first_name":"Felix"},{"full_name":"Cimiano, Philipp","last_name":"Cimiano","first_name":"Philipp"}],"date_created":"2024-10-09T14:50:09Z","date_updated":"2024-10-09T15:04:53Z","conference":{"location":"Lissabon","end_date":"2023-07-28","start_date":"2023-07-26","name":"xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023)"},"title":"A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations"},{"title":"Use What You Know: Network and Service Coordination Beyond Certainty","publisher":"IEEE","date_created":"2022-01-11T08:43:26Z","year":"2022","quality_controlled":"1","ddc":["004"],"keyword":["network management","service management","AI","Monte Carlo Tree Search","model-based","QoS"],"language":[{"iso":"eng"}],"abstract":[{"text":"Modern services often comprise several components, such as chained virtual network functions, microservices, or\r\nmachine learning functions. Providing such services requires to decide how often to instantiate each component, where to place these instances in the network, how to chain them and route traffic through them. \r\nTo overcome limitations of conventional, hardwired heuristics, deep reinforcement learning (DRL) approaches for self-learning network and service management have emerged recently. These model-free DRL approaches are more flexible but typically learn tabula rasa, i.e., disregard existing understanding of networks, services, and their coordination. \r\n\r\nInstead, we propose FutureCoord, a novel model-based AI approach that leverages existing understanding of networks and services for more efficient and effective coordination without time-intensive training. FutureCoord combines Monte Carlo Tree Search with a stochastic traffic model. This allows FutureCoord to estimate the impact of future incoming traffic and effectively optimize long-term effects, taking fluctuating demand and Quality of Service (QoS) requirements into account. Our extensive evaluation based on real-world network topologies, services, and traffic traces indicates that FutureCoord clearly outperforms state-of-the-art model-free and model-based approaches with up to 51% higher flow success ratios.","lang":"eng"}],"file":[{"date_updated":"2022-01-11T08:39:57Z","creator":"stschn","date_created":"2022-01-11T08:39:57Z","file_size":528653,"access_level":"open_access","file_id":"29222","file_name":"author_version.pdf","content_type":"application/pdf","relation":"main_file"}],"publication":"IEEE/IFIP Network Operations and Management Symposium (NOMS)","conference":{"start_date":"2022-04-25","name":"IEEE/IFIP Network Operations and Management Symposium (NOMS)","location":"Budapest","end_date":"2022-04-29"},"date_updated":"2022-01-11T08:44:04Z","oa":"1","author":[{"full_name":"Werner, Stefan","last_name":"Werner","first_name":"Stefan"},{"id":"35343","full_name":"Schneider, Stefan Balthasar","last_name":"Schneider","orcid":"0000-0001-8210-4011","first_name":"Stefan Balthasar"},{"full_name":"Karl, Holger","id":"126","last_name":"Karl","first_name":"Holger"}],"citation":{"apa":"Werner, S., Schneider, S. B., &#38; Karl, H. (2022). Use What You Know: Network and Service Coordination Beyond Certainty. <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE/IFIP Network Operations and Management Symposium (NOMS), Budapest.","mla":"Werner, Stefan, et al. “Use What You Know: Network and Service Coordination Beyond Certainty.” <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>, IEEE, 2022.","bibtex":"@inproceedings{Werner_Schneider_Karl_2022, title={Use What You Know: Network and Service Coordination Beyond Certainty}, booktitle={IEEE/IFIP Network Operations and Management Symposium (NOMS)}, publisher={IEEE}, author={Werner, Stefan and Schneider, Stefan Balthasar and Karl, Holger}, year={2022} }","short":"S. Werner, S.B. Schneider, H. Karl, in: IEEE/IFIP Network Operations and Management Symposium (NOMS), IEEE, 2022.","chicago":"Werner, Stefan, Stefan Balthasar Schneider, and Holger Karl. “Use What You Know: Network and Service Coordination Beyond Certainty.” In <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE, 2022.","ieee":"S. Werner, S. B. Schneider, and H. Karl, “Use What You Know: Network and Service Coordination Beyond Certainty,” presented at the IEEE/IFIP Network Operations and Management Symposium (NOMS), Budapest, 2022.","ama":"Werner S, Schneider SB, Karl H. Use What You Know: Network and Service Coordination Beyond Certainty. In: <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE; 2022."},"has_accepted_license":"1","file_date_updated":"2022-01-11T08:39:57Z","project":[{"name":"SFB 901: SFB 901","_id":"1"},{"name":"SFB 901 - C: SFB 901 - Project Area C","_id":"4"},{"name":"SFB 901 - C4: SFB 901 - Subproject C4","_id":"16"}],"_id":"29220","user_id":"35343","department":[{"_id":"75"}],"status":"public","type":"conference"},{"abstract":[{"lang":"eng","text":"Artificial intelligence (AI) has moved beyond the planning phase in many organisations and it is often accompanied by uncertainties and fears of job loss among employees. It is crucial to manage employees{\\textquoteright} attitudes towards the deployment of an AI-based technology effectively and counteract possible resistance behaviour. We present lessons learned from an industry case where we conducted interviews with affected employees. We evaluated our results with managers across industries and found that that the deployment of AI-based technologies does not differ from other IT, but that the change is perceived differently due to misguided expectations. "}],"status":"public","type":"journal_article","publication":"International Journal of Management Practice","keyword":["Artificial Intelligence","Change Management","Resistance","AI-Driven Change","AI Deployment","AI Perception"],"language":[{"iso":"eng"}],"_id":"37155","user_id":"80546","year":"2021","citation":{"ama":"Stieglitz S, Möllmann (Frick) NRJ, Mirbabaie M, Hofeditz L, Ross B. Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality. <i>International Journal of Management Practice</i>. Published online 2021.","ieee":"S. Stieglitz, N. R. J. Möllmann (Frick), M. Mirbabaie, L. Hofeditz, and B. Ross, “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality,” <i>International Journal of Management Practice</i>, 2021.","chicago":"Stieglitz, Stefan, Nicholas R. J. Möllmann (Frick), Milad Mirbabaie, Lennart Hofeditz, and Björn Ross. “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality.” <i>International Journal of Management Practice</i>, 2021.","apa":"Stieglitz, S., Möllmann (Frick), N. R. J., Mirbabaie, M., Hofeditz, L., &#38; Ross, B. (2021). Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality. <i>International Journal of Management Practice</i>.","mla":"Stieglitz, Stefan, et al. “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality.” <i>International Journal of Management Practice</i>, Inderscience, 2021.","short":"S. Stieglitz, N.R.J. Möllmann (Frick), M. Mirbabaie, L. Hofeditz, B. Ross, International Journal of Management Practice (2021).","bibtex":"@article{Stieglitz_Möllmann (Frick)_Mirbabaie_Hofeditz_Ross_2021, title={Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality}, journal={International Journal of Management Practice}, publisher={Inderscience}, author={Stieglitz, Stefan and Möllmann (Frick), Nicholas R. J. and Mirbabaie, Milad and Hofeditz, Lennart and Ross, Björn}, year={2021} }"},"publication_identifier":{"issn":["1477-9064"]},"title":"Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality","date_updated":"2023-01-18T07:59:08Z","publisher":"Inderscience","date_created":"2023-01-17T15:37:55Z","author":[{"first_name":"Stefan","last_name":"Stieglitz","full_name":"Stieglitz, Stefan"},{"last_name":"Möllmann (Frick)","full_name":"Möllmann (Frick), Nicholas R. J.","first_name":"Nicholas R. J."},{"id":"88691","full_name":"Mirbabaie, Milad","last_name":"Mirbabaie","first_name":"Milad"},{"last_name":"Hofeditz","full_name":"Hofeditz, Lennart","first_name":"Lennart"},{"last_name":"Ross","full_name":"Ross, Björn","first_name":"Björn"}]}]
