[{"user_id":"92810","department":[{"_id":"424"},{"_id":"660"}],"project":[{"name":"TRR 318 ; TP C01: Gesundes Misstrauen in Erklärungen","_id":"124"}],"_id":"63611","language":[{"iso":"eng"}],"article_type":"original","article_number":"1694367","keyword":["appropriate trust","healthy distrust","visual attention","Theory of Visual Attention","human-AI interaction","Bayesian cognitive model","image classification"],"type":"journal_article","publication":"Frontiers in Psychology","status":"public","abstract":[{"text":"When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.","lang":"eng"}],"date_created":"2026-01-14T14:21:59Z","author":[{"first_name":"Tobias Martin","full_name":"Peters, Tobias Martin","id":"92810","last_name":"Peters","orcid":"0009-0008-5193-6243"},{"full_name":"Biermeier, Kai","id":"55908","orcid":"0000-0002-2879-2359","last_name":"Biermeier","first_name":"Kai"},{"first_name":"Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451"}],"volume":16,"publisher":"Frontiers Media SA","date_updated":"2026-01-14T14:29:03Z","doi":"10.3389/fpsyg.2025.1694367","title":"Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention","publication_status":"published","publication_identifier":{"issn":["1664-1078"]},"citation":{"ieee":"T. M. Peters, K. Biermeier, and I. Scharlau, “Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention,” <i>Frontiers in Psychology</i>, vol. 16, Art. no. 1694367, 2026, doi: <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>.","chicago":"Peters, Tobias Martin, Kai Biermeier, and Ingrid Scharlau. “Assessing Healthy Distrust in Human-AI Interaction: Interpreting Changes in Visual Attention.” <i>Frontiers in Psychology</i> 16 (2026). <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">https://doi.org/10.3389/fpsyg.2025.1694367</a>.","ama":"Peters TM, Biermeier K, Scharlau I. Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention. <i>Frontiers in Psychology</i>. 2026;16. doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>","apa":"Peters, T. M., Biermeier, K., &#38; Scharlau, I. (2026). Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention. <i>Frontiers in Psychology</i>, <i>16</i>, Article 1694367. <a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">https://doi.org/10.3389/fpsyg.2025.1694367</a>","mla":"Peters, Tobias Martin, et al. “Assessing Healthy Distrust in Human-AI Interaction: Interpreting Changes in Visual Attention.” <i>Frontiers in Psychology</i>, vol. 16, 1694367, Frontiers Media SA, 2026, doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>.","bibtex":"@article{Peters_Biermeier_Scharlau_2026, title={Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}, volume={16}, DOI={<a href=\"https://doi.org/10.3389/fpsyg.2025.1694367\">10.3389/fpsyg.2025.1694367</a>}, number={1694367}, journal={Frontiers in Psychology}, publisher={Frontiers Media SA}, author={Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}, year={2026} }","short":"T.M. Peters, K. Biermeier, I. Scharlau, Frontiers in Psychology 16 (2026)."},"intvolume":"        16","year":"2026"},{"keyword":["XAI","Appropriate trust","Distrust","Reliance","Human-centric evaluation","Trustworthy AI"],"article_number":"101357","language":[{"iso":"eng"}],"_id":"59756","project":[{"name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen","_id":"124"}],"department":[{"_id":"424"},{"_id":"660"}],"user_id":"92810","abstract":[{"text":"A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.\r\nIn this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.","lang":"eng"}],"status":"public","publication":"Cognitive Systems Research","type":"journal_article","title":"Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation","doi":"10.1016/j.cogsys.2025.101357","publisher":"Elsevier BV","date_updated":"2025-05-15T11:16:27Z","author":[{"first_name":"Roel","full_name":"Visser, Roel","last_name":"Visser"},{"last_name":"Peters","orcid":"0009-0008-5193-6243","id":"92810","full_name":"Peters, Tobias Martin","first_name":"Tobias Martin"},{"last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451","first_name":"Ingrid"},{"first_name":"Barbara","full_name":"Hammer, Barbara","last_name":"Hammer"}],"date_created":"2025-05-02T09:26:15Z","year":"2025","citation":{"ama":"Visser R, Peters TM, Scharlau I, Hammer B. Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>. Published online 2025. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>","chicago":"Visser, Roel, Tobias Martin Peters, Ingrid Scharlau, and Barbara Hammer. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 2025. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>.","ieee":"R. Visser, T. M. Peters, I. Scharlau, and B. Hammer, “Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation,” <i>Cognitive Systems Research</i>, Art. no. 101357, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","apa":"Visser, R., Peters, T. M., Scharlau, I., &#38; Hammer, B. (2025). Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>, Article 101357. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>","mla":"Visser, Roel, et al. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 101357, Elsevier BV, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","short":"R. Visser, T.M. Peters, I. Scharlau, B. Hammer, Cognitive Systems Research (2025).","bibtex":"@article{Visser_Peters_Scharlau_Hammer_2025, title={Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>}, number={101357}, journal={Cognitive Systems Research}, publisher={Elsevier BV}, author={Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}, year={2025} }"},"publication_identifier":{"issn":["1389-0417"]},"publication_status":"published"},{"project":[{"name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen","_id":"124"}],"_id":"59755","user_id":"92810","department":[{"_id":"424"},{"_id":"660"}],"article_type":"original","keyword":["trust in AI","trust","distrust","human-AI interaction","Signal Detection Theory","Bayesian parameter estimation","image classification"],"language":[{"iso":"eng"}],"type":"journal_article","publication":"Frontiers in Psychology","abstract":[{"text":"Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,\r\ntrustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,\r\nfor example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated\r\nclassification as advice. Research to improve such interactions often aims to foster the\r\nuser’s trust, which in turn should improve the combined human-AI performance. Given that AI\r\nmodels can err, we argue that the possibility to critically review, thus to distrust, an AI decision is\r\nan equally interesting target of research.\r\nWe created two image classification scenarios in which the participants received mock-up\r\nAI advice. The quality of the advice decreases for a phase of the experiment. We studied the\r\ntask performance, trust and distrust of the participants, and tested whether an instruction to\r\nremain skeptical and review each piece of advice led to a better performance compared to a\r\nneutral condition. Our results indicate that this instruction does not improve but rather worsens\r\nthe participants’ performance. Repeated single-item self-report of trust and distrust shows an\r\nincrease in trust and a decrease in distrust after the drop in the AI’s classification quality, with no\r\ndifference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory\r\nanalysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether\r\nthe problems of under- and over-reliance have been mitigated. We discuss implications of our\r\nresults for the usage of disclaimers before interacting with AI, as prominently used in current\r\nLLM-based chatbots, and for trust and distrust research.","lang":"eng"}],"status":"public","date_updated":"2025-05-27T09:10:09Z","date_created":"2025-05-02T09:22:39Z","author":[{"orcid":"0009-0008-5193-6243","last_name":"Peters","id":"92810","full_name":"Peters, Tobias Martin","first_name":"Tobias Martin"},{"first_name":"Ingrid","full_name":"Scharlau, Ingrid","id":"451","last_name":"Scharlau","orcid":"0000-0003-2364-9489"}],"volume":16,"title":"Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?","doi":"10.3389/fpsyg.2025.1574809","publication_status":"published","year":"2025","citation":{"ama":"Peters TM, Scharlau I. Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications? <i>Frontiers in Psychology</i>. 2025;16. doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>","chicago":"Peters, Tobias Martin, and Ingrid Scharlau. “Interacting with Fallible AI: Is Distrust Helpful When Receiving AI Misclassifications?” <i>Frontiers in Psychology</i> 16 (2025). <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">https://doi.org/10.3389/fpsyg.2025.1574809</a>.","ieee":"T. M. Peters and I. Scharlau, “Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?,” <i>Frontiers in Psychology</i>, vol. 16, 2025, doi: <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>.","mla":"Peters, Tobias Martin, and Ingrid Scharlau. “Interacting with Fallible AI: Is Distrust Helpful When Receiving AI Misclassifications?” <i>Frontiers in Psychology</i>, vol. 16, 2025, doi:<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>.","short":"T.M. Peters, I. Scharlau, Frontiers in Psychology 16 (2025).","bibtex":"@article{Peters_Scharlau_2025, title={Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}, volume={16}, DOI={<a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">10.3389/fpsyg.2025.1574809</a>}, journal={Frontiers in Psychology}, author={Peters, Tobias Martin and Scharlau, Ingrid}, year={2025} }","apa":"Peters, T. M., &#38; Scharlau, I. (2025). Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications? <i>Frontiers in Psychology</i>, <i>16</i>. <a href=\"https://doi.org/10.3389/fpsyg.2025.1574809\">https://doi.org/10.3389/fpsyg.2025.1574809</a>"},"intvolume":"        16"},{"publication":"Industry 4.0 Science","abstract":[{"lang":"eng","text":"Technical systems are characterized by increasing interdisciplinarity, complexity and networking. A product and its corresponding production systems require interdisciplinary multi-objective optimization. Sustainability and recyclability demands increase said complexity. The efficiency of previously established engineering methods is reaching its limits, which can only be overcome by systematic integration of extreme data. The aim of \"hybrid decision support\" is as follows: Data science and artificial intelligence should be used to supplement human capabilities in conjunction with existing heuristics, methods, modeling and simulation to increase the efficiency of product creation."}],"language":[{"iso":"eng"}],"keyword":["AI","artificial intelligence","Data Science","decision support","extreme data","Künstliche Intelligenz","product creation","product development"],"issue":"1","quality_controlled":"1","year":"2025","date_created":"2025-02-15T09:31:30Z","publisher":"GITO mbH Verlag","title":"Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence","type":"journal_article","status":"public","user_id":"405","department":[{"_id":"152"}],"_id":"58650","alternative_title":["Hybride Entscheidungsunterstützung in der Produktentstehung - Mit Data Science und Künstlicher Intelligenz die Leistungsfähigkeit erhöhen"],"article_type":"original","publication_status":"published","publication_identifier":{"issn":["2942-6170"]},"citation":{"bibtex":"@article{Gräßler_Pottebaum_Nyhuis_Stark_Thoben_Wiederkehr_2025, title={Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence}, volume={2025}, DOI={<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>}, number={1}, journal={Industry 4.0 Science}, publisher={GITO mbH Verlag}, author={Gräßler, Iris and Pottebaum, Jens and Nyhuis, Peter and Stark, Rainer and Thoben, Klaus-Dieter and Wiederkehr, Petra}, year={2025} }","short":"I. Gräßler, J. Pottebaum, P. Nyhuis, R. Stark, K.-D. Thoben, P. Wiederkehr, Industry 4.0 Science 2025 (2025).","mla":"Gräßler, Iris, et al. “Hybrid Decision Support in Product Creation - Improving Performance with Data Science and Artificial Intelligence.” <i>Industry 4.0 Science</i>, vol. 2025, no. 1, GITO mbH Verlag, 2025, doi:<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>.","apa":"Gräßler, I., Pottebaum, J., Nyhuis, P., Stark, R., Thoben, K.-D., &#38; Wiederkehr, P. (2025). Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence. <i>Industry 4.0 Science</i>, <i>2025</i>(1). <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">https://doi.org/10.30844/i4sd.25.1.18</a>","ama":"Gräßler I, Pottebaum J, Nyhuis P, Stark R, Thoben K-D, Wiederkehr P. Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence. <i>Industry 40 Science</i>. 2025;2025(1). doi:<a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>","ieee":"I. Gräßler, J. Pottebaum, P. Nyhuis, R. Stark, K.-D. Thoben, and P. Wiederkehr, “Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence,” <i>Industry 4.0 Science</i>, vol. 2025, no. 1, 2025, doi: <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">10.30844/i4sd.25.1.18</a>.","chicago":"Gräßler, Iris, Jens Pottebaum, Peter Nyhuis, Rainer Stark, Klaus-Dieter Thoben, and Petra Wiederkehr. “Hybrid Decision Support in Product Creation - Improving Performance with Data Science and Artificial Intelligence.” <i>Industry 4.0 Science</i> 2025, no. 1 (2025). <a href=\"https://doi.org/10.30844/i4sd.25.1.18\">https://doi.org/10.30844/i4sd.25.1.18</a>."},"intvolume":"      2025","author":[{"first_name":"Iris","orcid":"0000-0001-5765-971X","last_name":"Gräßler","id":"47565","full_name":"Gräßler, Iris"},{"full_name":"Pottebaum, Jens","id":"405","orcid":"http://orcid.org/0000-0001-8778-2989","last_name":"Pottebaum","first_name":"Jens"},{"full_name":"Nyhuis, Peter","last_name":"Nyhuis","first_name":"Peter"},{"first_name":"Rainer","last_name":"Stark","full_name":"Stark, Rainer"},{"full_name":"Thoben, Klaus-Dieter","last_name":"Thoben","first_name":"Klaus-Dieter"},{"first_name":"Petra","full_name":"Wiederkehr, Petra","last_name":"Wiederkehr"}],"volume":2025,"oa":"1","date_updated":"2025-02-15T09:40:52Z","main_file_link":[{"open_access":"1"}],"doi":"10.30844/i4sd.25.1.18"},{"author":[{"first_name":"Christoph","last_name":"Breidbach","full_name":"Breidbach, Christoph"},{"first_name":"Casper Ferm","last_name":"Lars-Erik","full_name":"Lars-Erik, Casper Ferm"},{"first_name":"Paul","full_name":"Maglio, Paul","last_name":"Maglio"},{"first_name":"Daniel","last_name":"Beverungen","id":"59677","full_name":"Beverungen, Daniel"},{"first_name":"Jochen","full_name":"Wirtz, Jochen","last_name":"Wirtz"},{"first_name":"Alex","last_name":"Twigg","full_name":"Twigg, Alex"}],"date_created":"2025-09-23T11:47:47Z","date_updated":"2025-11-10T10:22:59Z","publisher":"Emerald","title":"Conscious Artificial Intelligence in Service","publication_status":"inpress","quality_controlled":"1","has_accepted_license":"1","citation":{"chicago":"Breidbach, Christoph, Casper Ferm Lars-Erik, Paul Maglio, Daniel Beverungen, Jochen Wirtz, and Alex Twigg. “Conscious Artificial Intelligence in Service.” <i>Journal of Service Management</i>, n.d.","ieee":"C. Breidbach, C. F. Lars-Erik, P. Maglio, D. Beverungen, J. Wirtz, and A. Twigg, “Conscious Artificial Intelligence in Service,” <i>Journal of Service Management</i>.","ama":"Breidbach C, Lars-Erik CF, Maglio P, Beverungen D, Wirtz J, Twigg A. Conscious Artificial Intelligence in Service. <i>Journal of Service Management</i>.","apa":"Breidbach, C., Lars-Erik, C. F., Maglio, P., Beverungen, D., Wirtz, J., &#38; Twigg, A. (n.d.). Conscious Artificial Intelligence in Service. <i>Journal of Service Management</i>.","short":"C. Breidbach, C.F. Lars-Erik, P. Maglio, D. Beverungen, J. Wirtz, A. Twigg, Journal of Service Management (n.d.).","bibtex":"@article{Breidbach_Lars-Erik_Maglio_Beverungen_Wirtz_Twigg, title={Conscious Artificial Intelligence in Service}, journal={Journal of Service Management}, publisher={Emerald}, author={Breidbach, Christoph and Lars-Erik, Casper Ferm and Maglio, Paul and Beverungen, Daniel and Wirtz, Jochen and Twigg, Alex} }","mla":"Breidbach, Christoph, et al. “Conscious Artificial Intelligence in Service.” <i>Journal of Service Management</i>, Emerald."},"year":"2025","user_id":"59677","department":[{"_id":"195"}],"_id":"61410","language":[{"iso":"eng"}],"file_date_updated":"2025-11-10T10:20:48Z","article_type":"original","ddc":["380"],"keyword":["AI","AI consciousness","AI ethics","service systems"],"type":"journal_article","publication":"Journal of Service Management","file":[{"creator":"dabe","date_created":"2025-11-10T10:20:48Z","date_updated":"2025-11-10T10:20:48Z","access_level":"closed","file_name":"Breidbach et al, 2025_Conscious AI in Service_w link.pdf","file_id":"62150","file_size":743479,"content_type":"application/pdf","relation":"main_file","success":1}],"status":"public","abstract":[{"lang":"eng","text":"Purpose: The purpose of this study is to identify, analyze, and explain the implications that could\r\narise for service settings if AI systems develop, or are perceived to develop, consciousness – the\r\nability to acknowledge their own existence and the capacity for positive or negative experiences.\r\n\r\nDesign/methodology/approach: This study proposes and explores four hypothetical scenarios in\r\nwhich conscious AI in service could manifest. We contextualize our resulting typology in the\r\nhealth service context and integrate extant literature on technology-enabled service, AI\r\nconsciousness, and AI ethics into the narrative.\r\n\r\nFindings: This study provides a unique theoretical contribution to service research in the form of\r\na Type IV theory. It enables future service researchers to apprehend, explain, and predict how\r\nfunctionally conscious AI in service might unfold.\r\n\r\nOriginality: An increasingly prolific public discourse acknowledges that conscious AI systems\r\nmay emerge. Against this backdrop, this study aims to systematically explore a question that is\r\nperhaps the most critical and timely, but also inherently speculative, in relation to AI in service\r\nresearch by introducing much-needed theory and terminology.\r\n\r\nPractical implications: The ethical use of conscious AI in service could emerge as a distinct\r\ncompetitive advantage in the future. Achieving this outcome involves speculative yet actionable\r\nrecommendations that include training, guiding, and controlling how humans engage with such\r\nsystems, developing appropriate wellbeing protocols for functionally conscious AI systems, and\r\nestablishing AI rights and governance frameworks."}]},{"article_number":"101419","article_type":"original","file_date_updated":"2025-12-01T21:02:20Z","_id":"61156","project":[{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"_id":"113","name":"TRR 318 - Subproject A3"},{"_id":"114","name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"_id":"123","name":"TRR 318 - Subproject B5"},{"name":"TRR 318 - Project Area Ö","_id":"119"}],"department":[{"_id":"660"}],"user_id":"57578","status":"public","type":"journal_article","doi":"10.1016/j.cogsys.2025.101419","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}],"oa":"1","date_updated":"2025-12-05T15:32:25Z","volume":94,"author":[{"full_name":"Buschmeier, Hendrik","id":"76456","last_name":"Buschmeier","orcid":"0000-0002-9613-5713","first_name":"Hendrik"},{"first_name":"Heike M.","id":"27152","full_name":"Buhl, Heike M.","last_name":"Buhl"},{"last_name":"Kern","full_name":"Kern, Friederike","first_name":"Friederike"},{"first_name":"Angela","last_name":"Grimminger","id":"57578","full_name":"Grimminger, Angela"},{"last_name":"Beierling","full_name":"Beierling, Helen","id":"50995","first_name":"Helen"},{"id":"56345","full_name":"Fisher, Josephine Beryl","last_name":"Fisher","orcid":"0000-0002-9997-9241","first_name":"Josephine Beryl"},{"first_name":"André","id":"93405","full_name":"Groß, André","orcid":"0000-0002-9593-7220","last_name":"Groß"},{"first_name":"Ilona","last_name":"Horwath","id":"68836","full_name":"Horwath, Ilona"},{"id":"98454","full_name":"Klowait, Nils","last_name":"Klowait","orcid":"0000-0002-7347-099X","first_name":"Nils"},{"orcid":"0009-0009-0892-9483","last_name":"Lazarov","full_name":"Lazarov, Stefan Teodorov","id":"90345","first_name":"Stefan Teodorov"},{"first_name":"Michael","full_name":"Lenke, Michael","last_name":"Lenke"},{"last_name":"Lohmer","full_name":"Lohmer, Vivien","first_name":"Vivien"},{"last_name":"Rohlfing","orcid":"0000-0002-5676-8233","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451","first_name":"Ingrid"},{"id":"91018","full_name":"Singh, Amit","last_name":"Singh","orcid":"0000-0002-7789-1521","first_name":"Amit"},{"first_name":"Lutz","last_name":"Terfloth","full_name":"Terfloth, Lutz","id":"37320"},{"first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa","id":"86589","last_name":"Vollmer"},{"first_name":"Yu","last_name":"Wang","full_name":"Wang, Yu"},{"first_name":"Annedore","last_name":"Wilmes","full_name":"Wilmes, Annedore"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"}],"intvolume":"        94","citation":{"chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>","apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025)."},"has_accepted_license":"1","publication_status":"published","keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"ddc":["006"],"language":[{"iso":"eng"}],"abstract":[{"text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.","lang":"eng"}],"file":[{"relation":"main_file","success":1,"content_type":"application/pdf","file_id":"62730","access_level":"closed","file_name":"Buschmeier-etal-2025-COGSYS.pdf","file_size":10114981,"creator":"hbuschme","date_created":"2025-12-01T21:02:20Z","date_updated":"2025-12-01T21:02:20Z"}],"publication":"Cognitive Systems Research","title":"Forms of Understanding for XAI-Explanations","date_created":"2025-09-08T14:24:32Z","year":"2025","quality_controlled":"1"},{"publication":"SDEWES Conference 2025","type":"conference_abstract","status":"public","department":[{"_id":"876"},{"_id":"321"},{"_id":"9"},{"_id":"393"}],"user_id":"103302","_id":"63019","language":[{"iso":"eng"}],"keyword":["5GDHC","district heating","DHC","waste heat","AI-Driven"],"citation":{"bibtex":"@inproceedings{Donner_Schlüter_2025, title={Development of an AI-driven decentralized control for fifth generation district heating and cooling networks}, booktitle={SDEWES Conference 2025}, author={Donner, Johannes Aurelius Tamino and Schlüter, Alexander}, year={2025} }","mla":"Donner, Johannes Aurelius Tamino, and Alexander Schlüter. “Development of an AI-Driven Decentralized Control for Fifth Generation District Heating and Cooling Networks.” <i>SDEWES Conference 2025</i>, 2025.","short":"J.A.T. Donner, A. Schlüter, in: SDEWES Conference 2025, 2025.","apa":"Donner, J. A. T., &#38; Schlüter, A. (2025). Development of an AI-driven decentralized control for fifth generation district heating and cooling networks. <i>SDEWES Conference 2025</i>. 20th SDEWES Conference, Dubrovnik.","ieee":"J. A. T. Donner and A. Schlüter, “Development of an AI-driven decentralized control for fifth generation district heating and cooling networks,” presented at the 20th SDEWES Conference, Dubrovnik, 2025.","chicago":"Donner, Johannes Aurelius Tamino, and Alexander Schlüter. “Development of an AI-Driven Decentralized Control for Fifth Generation District Heating and Cooling Networks.” In <i>SDEWES Conference 2025</i>, 2025.","ama":"Donner JAT, Schlüter A. Development of an AI-driven decentralized control for fifth generation district heating and cooling networks. In: <i>SDEWES Conference 2025</i>. ; 2025."},"year":"2025","author":[{"first_name":"Johannes Aurelius Tamino","last_name":"Donner","orcid":"0009-0007-4757-4393","full_name":"Donner, Johannes Aurelius Tamino","id":"72054"},{"first_name":"Alexander","last_name":"Schlüter","orcid":"0000-0002-2569-1624","full_name":"Schlüter, Alexander","id":"103302"}],"date_created":"2025-12-10T12:30:59Z","date_updated":"2026-01-06T07:53:40Z","conference":{"start_date":"05.10.2025","name":"20th SDEWES Conference","location":"Dubrovnik","end_date":"10.10.2025"},"title":"Development of an AI-driven decentralized control for fifth generation district heating and cooling networks"},{"publication":"DS 130: Proceedings of NordDesign 2024","type":"conference","status":"public","abstract":[{"text":"Developing Intelligent Technical Systems (ITS) involves a complex process encompassing planning, analysis, design, production, and maintenance. Model-Based Systems Engineering (MBSE) is a key methodology for systematic systems engineering. Designing models for ITS requires harmonious interaction of various elements, posing a challenge in MBSE. Leveraging Generative Artificial Intelligence, we generated a dataset for modeling, using prompt engineering on large language models. The generated artifacts can aid engineers in MBSE design or serve as synthetic training data for AI assistants.","lang":"eng"}],"editor":[{"last_name":"Malmqvist","full_name":"Malmqvist, J.","first_name":"J."},{"last_name":"Candi","full_name":"Candi, M.","first_name":"M."},{"full_name":"Saemundsson, R.","last_name":"Saemundsson","first_name":"R."},{"first_name":"F.","full_name":"Bystrom, F.","last_name":"Bystrom"},{"first_name":"O.","full_name":"Isaksson, O.","last_name":"Isaksson"}],"user_id":"86782","_id":"56166","language":[{"iso":"eng"}],"keyword":["Data Driven Design","Design Automation","Systems Engineering (SE)","Artificial Intelligence (AI)"],"related_material":{"link":[{"url":"https://www.designsociety.org/publication/47658/Towards+Automated+Design%3A+Automatically+Generating+Modeling+Elements+with+Prompt+Engineering+and+Generative+Artificial+Intelligence","relation":"confirmation"}]},"publication_identifier":{"unknown":["978-1-912254-21-7"]},"publication_status":"epub_ahead","page":"617-625","citation":{"ieee":"P. J. Kulkarni, D. Tissen, R. Bernijazov, and R. Dumitrescu, “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence,” in <i>DS 130: Proceedings of NordDesign 2024</i>, Reykjavik, 2024, pp. 617–625, doi: <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>.","chicago":"Kulkarni, Pranav Jayant, Denis Tissen, Ruslan Bernijazov, and Roman Dumitrescu. “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence.” In <i>DS 130: Proceedings of NordDesign 2024</i>, edited by J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, and O. Isaksson, 617–25, 2024. <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">https://doi.org/10.35199/NORDDESIGN2024.66</a>.","ama":"Kulkarni PJ, Tissen D, Bernijazov R, Dumitrescu R. Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence. In: Malmqvist J, Candi M, Saemundsson R, Bystrom F, Isaksson O, eds. <i>DS 130: Proceedings of NordDesign 2024</i>. ; 2024:617-625. doi:<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>","bibtex":"@inproceedings{Kulkarni_Tissen_Bernijazov_Dumitrescu_2024, title={Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence}, DOI={<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>}, booktitle={DS 130: Proceedings of NordDesign 2024}, author={Kulkarni, Pranav Jayant and Tissen, Denis and Bernijazov, Ruslan and Dumitrescu, Roman}, editor={Malmqvist, J. and Candi, M. and Saemundsson, R. and Bystrom, F. and Isaksson, O.}, year={2024}, pages={617–625} }","mla":"Kulkarni, Pranav Jayant, et al. “Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence.” <i>DS 130: Proceedings of NordDesign 2024</i>, edited by J. Malmqvist et al., 2024, pp. 617–25, doi:<a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">10.35199/NORDDESIGN2024.66</a>.","short":"P.J. Kulkarni, D. Tissen, R. Bernijazov, R. Dumitrescu, in: J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, O. Isaksson (Eds.), DS 130: Proceedings of NordDesign 2024, 2024, pp. 617–625.","apa":"Kulkarni, P. J., Tissen, D., Bernijazov, R., &#38; Dumitrescu, R. (2024). Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence. In J. Malmqvist, M. Candi, R. Saemundsson, F. Bystrom, &#38; O. Isaksson (Eds.), <i>DS 130: Proceedings of NordDesign 2024</i> (pp. 617–625). <a href=\"https://doi.org/10.35199/NORDDESIGN2024.66\">https://doi.org/10.35199/NORDDESIGN2024.66</a>"},"year":"2024","author":[{"first_name":"Pranav Jayant","id":"86782","full_name":"Kulkarni, Pranav Jayant","last_name":"Kulkarni"},{"full_name":"Tissen, Denis","id":"44458","last_name":"Tissen","first_name":"Denis"},{"first_name":"Ruslan","last_name":"Bernijazov","full_name":"Bernijazov, Ruslan","id":"36312"},{"first_name":"Roman","last_name":"Dumitrescu","id":"16190","full_name":"Dumitrescu, Roman"}],"date_created":"2024-09-17T09:56:43Z","date_updated":"2024-09-17T09:57:07Z","conference":{"name":"NordDesign Conference 2024","start_date":"2024-08-12","end_date":"2024-08-14","location":"Reykjavik"},"doi":"10.35199/NORDDESIGN2024.66","title":"Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence"},{"_id":"56277","user_id":"47857","keyword":["AI","argumentation mining","discourse history","(automated","learner-sensitive) feedback"],"language":[{"iso":"eng"}],"type":"conference","publication":"Proceedings of the Tenth Conference of the International Society for the Study of Argumentation","abstract":[{"lang":"eng","text":"What is learner-sensitive feedback to argumentative learner texts when it is to be issued computer- based? Learning stages are difficult to quantify. The paper provides insight into the history of research since the 1980s and a preview of what this automated feedback might look like. These questions are embedded in a research project at the Universities of Paderborn and Hannover, Germany, from which a software (project name ArgSchool) emerges that will provide such feedback."}],"status":"public","date_updated":"2024-09-30T09:25:14Z","author":[{"last_name":"Kilsbach","id":"93839","full_name":"Kilsbach, Sebastian","first_name":"Sebastian"},{"full_name":"Michel, Nadine","id":"47857","last_name":"Michel","first_name":"Nadine"}],"date_created":"2024-09-30T09:24:12Z","title":"Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts","conference":{"location":"Leiden","end_date":"2023-07-07","start_date":"2023-07-04","name":"Tenth Conference of the International Society for the Study of Argumentation"},"year":"2024","citation":{"ama":"Kilsbach S, Michel N. Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts. In: <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>. ; 2024.","chicago":"Kilsbach, Sebastian, and Nadine Michel. “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts.” In <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>, 2024.","ieee":"S. Kilsbach and N. Michel, “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts,” presented at the Tenth Conference of the International Society for the Study of Argumentation, Leiden, 2024.","apa":"Kilsbach, S., &#38; Michel, N. (2024). Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts. <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>. Tenth Conference of the International Society for the Study of Argumentation, Leiden.","bibtex":"@inproceedings{Kilsbach_Michel_2024, title={Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts}, booktitle={Proceedings of the Tenth Conference of the International Society for the Study of Argumentation}, author={Kilsbach, Sebastian and Michel, Nadine}, year={2024} }","short":"S. Kilsbach, N. Michel, in: Proceedings of the Tenth Conference of the International Society for the Study of Argumentation, 2024.","mla":"Kilsbach, Sebastian, and Nadine Michel. “Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts.” <i>Proceedings of the Tenth Conference of the International Society for the Study of Argumentation</i>, 2024."}},{"citation":{"apa":"Bhila, I., &#38; Bode, I. (2024). <i>The problem of algorithmic bias in AI-based military decision support systems</i>. ICRC Humanitarian Law &#38; Policy Blog.","mla":"Bhila, Ishmael, and Ingvild Bode. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","bibtex":"@book{Bhila_Bode_2024, title={The problem of algorithmic bias in AI-based military decision support systems}, publisher={ICRC Humanitarian Law &#38; Policy Blog}, author={Bhila, Ishmael and Bode, Ingvild}, year={2024} }","short":"I. Bhila, I. Bode, The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems, ICRC Humanitarian Law &#38; Policy Blog, 2024.","chicago":"Bhila, Ishmael, and Ingvild Bode. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","ieee":"I. Bhila and I. Bode, <i>The problem of algorithmic bias in AI-based military decision support systems</i>. ICRC Humanitarian Law &#38; Policy Blog, 2024.","ama":"Bhila I, Bode I. <i>The Problem of Algorithmic Bias in AI-Based Military Decision Support Systems</i>. ICRC Humanitarian Law &#38; Policy Blog; 2024."},"year":"2024","related_material":{"link":[{"relation":"confirmation","url":"https://blogs.icrc.org/law-and-policy/2024/09/03/the-problem-of-algorithmic-bias-in-ai-based-military-decision-support-systems/"}]},"has_accepted_license":"1","publication_status":"published","main_file_link":[{"url":"https://blogs.icrc.org/law-and-policy/2024/09/03/the-problem-of-algorithmic-bias-in-ai-based-military-decision-support-systems/","open_access":"1"}],"title":"The problem of algorithmic bias in AI-based military decision support systems","author":[{"id":"105772","full_name":"Bhila, Ishmael","last_name":"Bhila","first_name":"Ishmael"},{"first_name":"Ingvild","last_name":"Bode","full_name":"Bode, Ingvild"}],"date_created":"2024-09-30T11:44:28Z","publisher":"ICRC Humanitarian Law & Policy Blog","oa":"1","date_updated":"2024-11-26T09:49:48Z","status":"public","abstract":[{"lang":"eng","text":"Algorithmic bias has long been recognized as a key problem affecting decision-making processes that integrate artificial intelligence (AI) technologies. The increased use of AI in making military decisions relevant to the use of force has sustained such questions about biases in these technologies and in how human users programme with and rely on data based on hierarchized socio-cultural norms, knowledges, and modes of attention.\r\n\r\nIn this post, Dr Ingvild Bode, Professor at the Center for War Studies, University of Southern Denmark, and Ishmael Bhila, PhD researcher at the “Meaningful Human Control: Between Regulation and Reflexion” project, Paderborn University, unpack the problem of algorithmic bias with reference to AI-based decision support systems (AI DSS). They examine three categories of algorithmic bias – preexisting bias, technical bias, and emergent bias – across four lifecycle stages of an AI DSS, concluding that stakeholders in the ongoing discussion about AI in the military domain should consider the impact of algorithmic bias on AI DSS more seriously."}],"type":"misc","language":[{"iso":"eng"}],"keyword":["Algorithmic Bias","AI","Decision Support Systems","Autonomous Weapons Systems"],"user_id":"105772","_id":"56282"},{"issue":"3","citation":{"apa":"Esposito, E. (2023). Does Explainability Require Transparency? <i>Sociologica</i>, <i>16</i>(3), 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>","short":"E. Esposito, Sociologica 16 (2023) 17–27.","mla":"Esposito, Elena. “Does Explainability Require Transparency?” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 17–27, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>.","bibtex":"@article{Esposito_2023, title={Does Explainability Require Transparency?}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena }, year={2023}, pages={17–27} }","ama":"Esposito E. Does Explainability Require Transparency? <i>Sociologica</i>. 2023;16(3):17-27. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>","chicago":"Esposito, Elena . “Does Explainability Require Transparency?” <i>Sociologica</i> 16, no. 3 (2023): 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>.","ieee":"E. Esposito, “Does Explainability Require Transparency?,” <i>Sociologica</i>, vol. 16, no. 3, pp. 17–27, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>."},"page":"17-27","intvolume":"        16","year":"2023","date_created":"2024-02-18T10:16:43Z","author":[{"first_name":"Elena ","full_name":"Esposito, Elena ","last_name":"Esposito"}],"volume":16,"date_updated":"2024-02-26T08:46:26Z","doi":"10.6092/ISSN.1971-8853/15804","title":"Does Explainability Require Transparency?","type":"journal_article","publication":"Sociologica","status":"public","abstract":[{"text":"Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.","lang":"eng"}],"user_id":"54779","department":[{"_id":"660"}],"project":[{"_id":"121","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","grant_number":"438445824"}],"_id":"51368","language":[{"iso":"eng"}],"keyword":["Explainable AI","Transparency","Explanation","Communication","Sociological systems theory"]},{"status":"public","abstract":[{"lang":"eng","text":"This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions."}],"publication":"Sociologica","type":"journal_article","language":[{"iso":"eng"}],"keyword":["Explainable AI","Inexplicability","Transparency","Explanation","Opacity","Contestability"],"department":[{"_id":"660"}],"user_id":"54779","_id":"51369","project":[{"_id":"121","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","grant_number":"438445824"}],"page":"1-4","intvolume":"        16","citation":{"ieee":"E. Esposito, “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction,” <i>Sociologica</i>, vol. 16, no. 3, pp. 1–4, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","chicago":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i> 16, no. 3 (2023): 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>.","ama":"Esposito E. Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>. 2023;16(3):1-4. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>","mla":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 1–4, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","short":"E. Esposito, Sociologica 16 (2023) 1–4.","bibtex":"@article{Esposito_2023, title={Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena}, year={2023}, pages={1–4} }","apa":"Esposito, E. (2023). Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>, <i>16</i>(3), 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>"},"year":"2023","issue":"3","doi":"10.6092/ISSN.1971-8853/16265","title":"Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction","volume":16,"date_created":"2024-02-18T10:23:23Z","author":[{"first_name":"Elena","last_name":"Esposito","full_name":"Esposito, Elena"}],"date_updated":"2024-02-26T08:45:56Z"},{"type":"conference","publication":"Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)","abstract":[{"lang":"eng","text":"Algorithmic fairness in Information Systems (IS) is a concept that aims to mitigate systematic discrimination and bias in automated decision-making. However, previous research argued that different fairness criteria are often incompatible. In hiring, AI is used to assess and rank applicants according to their fit for vacant positions. However, various types of bias also exist for AI-based algorithms (e.g., using biased historical data). To reduce AI’s bias and thereby unfair treatment, we conducted a systematic literature review to identify suitable strategies for the context of hiring. We identified nine fundamental articles in this context and extracted four types of approaches to address unfairness in AI, namely pre-process, in-process, post-process, and feature selection. Based on our findings, we (a) derived a research agenda for future studies and (b) proposed strategies for practitioners who design and develop AIs for hiring purposes."}],"status":"public","_id":"33490","user_id":"77643","keyword":["fairness in AI","SLR","hiring","AI implementation","AI-based algorithms"],"language":[{"iso":"eng"}],"year":"2023","citation":{"ieee":"J. Rieskamp, L. Hofeditz, M. Mirbabaie, and S. Stieglitz, “Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research,” presented at the Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS), 2023.","chicago":"Rieskamp, Jonas, Lennart Hofeditz, Milad Mirbabaie, and Stefan Stieglitz. “Approaches to Improve Fairness When Deploying AI-Based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research.” In <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>, 2023.","ama":"Rieskamp J, Hofeditz L, Mirbabaie M, Stieglitz S. Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research. In: <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>. ; 2023.","short":"J. Rieskamp, L. Hofeditz, M. Mirbabaie, S. Stieglitz, in: Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS), 2023.","bibtex":"@inproceedings{Rieskamp_Hofeditz_Mirbabaie_Stieglitz_2023, title={Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research}, booktitle={Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)}, author={Rieskamp, Jonas and Hofeditz, Lennart and Mirbabaie, Milad and Stieglitz, Stefan}, year={2023} }","mla":"Rieskamp, Jonas, et al. “Approaches to Improve Fairness When Deploying AI-Based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research.” <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>, 2023.","apa":"Rieskamp, J., Hofeditz, L., Mirbabaie, M., &#38; Stieglitz, S. (2023). Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research. <i>Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)</i>. Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)."},"date_updated":"2023-02-06T14:39:51Z","author":[{"first_name":"Jonas","last_name":"Rieskamp","full_name":"Rieskamp, Jonas","id":"77643"},{"last_name":"Hofeditz","full_name":"Hofeditz, Lennart","first_name":"Lennart"},{"id":"88691","full_name":"Mirbabaie, Milad","last_name":"Mirbabaie","first_name":"Milad"},{"full_name":"Stieglitz, Stefan","last_name":"Stieglitz","first_name":"Stefan"}],"date_created":"2022-09-27T12:39:12Z","title":"Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research","main_file_link":[{"url":"https://hdl.handle.net/10125/102654"}],"conference":{"name":"Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)","start_date":"2023-01-03","end_date":"2023-01-06"}},{"department":[{"_id":"195"},{"_id":"196"}],"user_id":"77066","_id":"45299","language":[{"iso":"eng"}],"keyword":["Explainable AI (XAI)","machine learning","interpretability","real estate appraisal","framework","taxonomy"],"publication":"Journal of Decision Systems","type":"journal_article","status":"public","abstract":[{"lang":"eng","text":"Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity."}],"author":[{"full_name":"Kucklick, Jan-Peter","id":"77066","last_name":"Kucklick","first_name":"Jan-Peter"}],"date_created":"2023-05-26T05:04:45Z","date_updated":"2023-05-26T05:08:36Z","publisher":"Taylor & Francis","doi":"10.1080/12460125.2023.2207268","main_file_link":[{"url":"https://www.tandfonline.com/doi/full/10.1080/12460125.2023.2207268"}],"title":"HIEF: a holistic interpretability and explainability framework","publication_identifier":{"issn":["1246-0125","2116-7052"]},"publication_status":"published","page":"1-41","citation":{"ieee":"J.-P. Kucklick, “HIEF: a holistic interpretability and explainability framework,” <i>Journal of Decision Systems</i>, pp. 1–41, 2023, doi: <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","chicago":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, 2023, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>.","ama":"Kucklick J-P. HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>. Published online 2023:1-41. doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>","short":"J.-P. Kucklick, Journal of Decision Systems (2023) 1–41.","bibtex":"@article{Kucklick_2023, title={HIEF: a holistic interpretability and explainability framework}, DOI={<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>}, journal={Journal of Decision Systems}, publisher={Taylor &#38; Francis}, author={Kucklick, Jan-Peter}, year={2023}, pages={1–41} }","mla":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, Taylor &#38; Francis, 2023, pp. 1–41, doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","apa":"Kucklick, J.-P. (2023). HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>"},"year":"2023"},{"citation":{"mla":"Liedeker, Felix, and Philipp Cimiano. <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. 2023.","short":"F. Liedeker, P. Cimiano, in: 2023.","bibtex":"@inproceedings{Liedeker_Cimiano_2023, title={A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}, author={Liedeker, Felix and Cimiano, Philipp}, year={2023} }","apa":"Liedeker, F., &#38; Cimiano, P. (2023). <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon.","ama":"Liedeker F, Cimiano P. A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations. In: ; 2023.","chicago":"Liedeker, Felix, and Philipp Cimiano. “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” 2023.","ieee":"F. Liedeker and P. Cimiano, “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” presented at the xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon, 2023."},"year":"2023","conference":{"end_date":"2023-07-28","location":"Lissabon","name":"xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023)","start_date":"2023-07-26"},"title":"A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations","author":[{"last_name":"Liedeker","id":"93275","full_name":"Liedeker, Felix","first_name":"Felix"},{"full_name":"Cimiano, Philipp","last_name":"Cimiano","first_name":"Philipp"}],"date_created":"2024-10-09T14:50:09Z","date_updated":"2024-10-09T15:04:53Z","status":"public","abstract":[{"lang":"eng","text":"We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS."}],"type":"conference","language":[{"iso":"eng"}],"keyword":["Explainable AI","Clinical decision support","Bayesian network","Counterfactual explanations"],"department":[{"_id":"660"}],"user_id":"93275","_id":"56477","project":[{"name":"TRR 318 - C5: TRR 318 - Subproject C5","_id":"128"}]},{"publication":"IEEE/IFIP Network Operations and Management Symposium (NOMS)","file":[{"file_size":528653,"file_id":"29222","access_level":"open_access","file_name":"author_version.pdf","date_updated":"2022-01-11T08:39:57Z","date_created":"2022-01-11T08:39:57Z","creator":"stschn","relation":"main_file","content_type":"application/pdf"}],"abstract":[{"lang":"eng","text":"Modern services often comprise several components, such as chained virtual network functions, microservices, or\r\nmachine learning functions. Providing such services requires to decide how often to instantiate each component, where to place these instances in the network, how to chain them and route traffic through them. \r\nTo overcome limitations of conventional, hardwired heuristics, deep reinforcement learning (DRL) approaches for self-learning network and service management have emerged recently. These model-free DRL approaches are more flexible but typically learn tabula rasa, i.e., disregard existing understanding of networks, services, and their coordination. \r\n\r\nInstead, we propose FutureCoord, a novel model-based AI approach that leverages existing understanding of networks and services for more efficient and effective coordination without time-intensive training. FutureCoord combines Monte Carlo Tree Search with a stochastic traffic model. This allows FutureCoord to estimate the impact of future incoming traffic and effectively optimize long-term effects, taking fluctuating demand and Quality of Service (QoS) requirements into account. Our extensive evaluation based on real-world network topologies, services, and traffic traces indicates that FutureCoord clearly outperforms state-of-the-art model-free and model-based approaches with up to 51% higher flow success ratios."}],"language":[{"iso":"eng"}],"ddc":["004"],"keyword":["network management","service management","AI","Monte Carlo Tree Search","model-based","QoS"],"quality_controlled":"1","year":"2022","date_created":"2022-01-11T08:43:26Z","publisher":"IEEE","title":"Use What You Know: Network and Service Coordination Beyond Certainty","type":"conference","status":"public","user_id":"35343","department":[{"_id":"75"}],"project":[{"_id":"1","name":"SFB 901: SFB 901"},{"name":"SFB 901 - C: SFB 901 - Project Area C","_id":"4"},{"_id":"16","name":"SFB 901 - C4: SFB 901 - Subproject C4"}],"_id":"29220","file_date_updated":"2022-01-11T08:39:57Z","has_accepted_license":"1","citation":{"short":"S. Werner, S.B. Schneider, H. Karl, in: IEEE/IFIP Network Operations and Management Symposium (NOMS), IEEE, 2022.","bibtex":"@inproceedings{Werner_Schneider_Karl_2022, title={Use What You Know: Network and Service Coordination Beyond Certainty}, booktitle={IEEE/IFIP Network Operations and Management Symposium (NOMS)}, publisher={IEEE}, author={Werner, Stefan and Schneider, Stefan Balthasar and Karl, Holger}, year={2022} }","mla":"Werner, Stefan, et al. “Use What You Know: Network and Service Coordination Beyond Certainty.” <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>, IEEE, 2022.","apa":"Werner, S., Schneider, S. B., &#38; Karl, H. (2022). Use What You Know: Network and Service Coordination Beyond Certainty. <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE/IFIP Network Operations and Management Symposium (NOMS), Budapest.","ama":"Werner S, Schneider SB, Karl H. Use What You Know: Network and Service Coordination Beyond Certainty. In: <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE; 2022.","chicago":"Werner, Stefan, Stefan Balthasar Schneider, and Holger Karl. “Use What You Know: Network and Service Coordination Beyond Certainty.” In <i>IEEE/IFIP Network Operations and Management Symposium (NOMS)</i>. IEEE, 2022.","ieee":"S. Werner, S. B. Schneider, and H. Karl, “Use What You Know: Network and Service Coordination Beyond Certainty,” presented at the IEEE/IFIP Network Operations and Management Symposium (NOMS), Budapest, 2022."},"author":[{"first_name":"Stefan","full_name":"Werner, Stefan","last_name":"Werner"},{"first_name":"Stefan Balthasar","full_name":"Schneider, Stefan Balthasar","id":"35343","last_name":"Schneider","orcid":"0000-0001-8210-4011"},{"first_name":"Holger","full_name":"Karl, Holger","id":"126","last_name":"Karl"}],"date_updated":"2022-01-11T08:44:04Z","oa":"1","conference":{"location":"Budapest","end_date":"2022-04-29","start_date":"2022-04-25","name":"IEEE/IFIP Network Operations and Management Symposium (NOMS)"}},{"keyword":["Artificial Intelligence","Change Management","Resistance","AI-Driven Change","AI Deployment","AI Perception"],"language":[{"iso":"eng"}],"_id":"37155","user_id":"80546","abstract":[{"lang":"eng","text":"Artificial intelligence (AI) has moved beyond the planning phase in many organisations and it is often accompanied by uncertainties and fears of job loss among employees. It is crucial to manage employees{\\textquoteright} attitudes towards the deployment of an AI-based technology effectively and counteract possible resistance behaviour. We present lessons learned from an industry case where we conducted interviews with affected employees. We evaluated our results with managers across industries and found that that the deployment of AI-based technologies does not differ from other IT, but that the change is perceived differently due to misguided expectations. "}],"status":"public","type":"journal_article","publication":"International Journal of Management Practice","title":"Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality","date_updated":"2023-01-18T07:59:08Z","publisher":"Inderscience","date_created":"2023-01-17T15:37:55Z","author":[{"first_name":"Stefan","full_name":"Stieglitz, Stefan","last_name":"Stieglitz"},{"first_name":"Nicholas R. J.","last_name":"Möllmann (Frick)","full_name":"Möllmann (Frick), Nicholas R. J."},{"full_name":"Mirbabaie, Milad","id":"88691","last_name":"Mirbabaie","first_name":"Milad"},{"full_name":"Hofeditz, Lennart","last_name":"Hofeditz","first_name":"Lennart"},{"first_name":"Björn","last_name":"Ross","full_name":"Ross, Björn"}],"year":"2021","citation":{"chicago":"Stieglitz, Stefan, Nicholas R. J. Möllmann (Frick), Milad Mirbabaie, Lennart Hofeditz, and Björn Ross. “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality.” <i>International Journal of Management Practice</i>, 2021.","ieee":"S. Stieglitz, N. R. J. Möllmann (Frick), M. Mirbabaie, L. Hofeditz, and B. Ross, “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality,” <i>International Journal of Management Practice</i>, 2021.","ama":"Stieglitz S, Möllmann (Frick) NRJ, Mirbabaie M, Hofeditz L, Ross B. Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality. <i>International Journal of Management Practice</i>. Published online 2021.","mla":"Stieglitz, Stefan, et al. “Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality.” <i>International Journal of Management Practice</i>, Inderscience, 2021.","short":"S. Stieglitz, N.R.J. Möllmann (Frick), M. Mirbabaie, L. Hofeditz, B. Ross, International Journal of Management Practice (2021).","bibtex":"@article{Stieglitz_Möllmann (Frick)_Mirbabaie_Hofeditz_Ross_2021, title={Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality}, journal={International Journal of Management Practice}, publisher={Inderscience}, author={Stieglitz, Stefan and Möllmann (Frick), Nicholas R. J. and Mirbabaie, Milad and Hofeditz, Lennart and Ross, Björn}, year={2021} }","apa":"Stieglitz, S., Möllmann (Frick), N. R. J., Mirbabaie, M., Hofeditz, L., &#38; Ross, B. (2021). Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality. <i>International Journal of Management Practice</i>."},"publication_identifier":{"issn":["1477-9064"]}}]
