[{"type":"journal_article","publication":"Cognitive Systems Research","status":"public","abstract":[{"text":"A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.\r\nIn this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.","lang":"eng"}],"user_id":"92810","department":[{"_id":"424"},{"_id":"660"}],"project":[{"_id":"124","name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen"}],"_id":"59756","language":[{"iso":"eng"}],"article_number":"101357","keyword":["XAI","Appropriate trust","Distrust","Reliance","Human-centric evaluation","Trustworthy AI"],"publication_status":"published","publication_identifier":{"issn":["1389-0417"]},"citation":{"short":"R. Visser, T.M. Peters, I. Scharlau, B. Hammer, Cognitive Systems Research (2025).","bibtex":"@article{Visser_Peters_Scharlau_Hammer_2025, title={Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>}, number={101357}, journal={Cognitive Systems Research}, publisher={Elsevier BV}, author={Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}, year={2025} }","mla":"Visser, Roel, et al. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 101357, Elsevier BV, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","apa":"Visser, R., Peters, T. M., Scharlau, I., &#38; Hammer, B. (2025). Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>, Article 101357. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>","ieee":"R. Visser, T. M. Peters, I. Scharlau, and B. Hammer, “Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation,” <i>Cognitive Systems Research</i>, Art. no. 101357, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>.","chicago":"Visser, Roel, Tobias Martin Peters, Ingrid Scharlau, and Barbara Hammer. “Trust, Distrust, and Appropriate Reliance in (X)AI: A Conceptual Clarification of User Trust and Survey of Its Empirical Evaluation.” <i>Cognitive Systems Research</i>, 2025. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">https://doi.org/10.1016/j.cogsys.2025.101357</a>.","ama":"Visser R, Peters TM, Scharlau I, Hammer B. Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation. <i>Cognitive Systems Research</i>. Published online 2025. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101357\">10.1016/j.cogsys.2025.101357</a>"},"year":"2025","author":[{"first_name":"Roel","full_name":"Visser, Roel","last_name":"Visser"},{"full_name":"Peters, Tobias Martin","id":"92810","last_name":"Peters","orcid":"0009-0008-5193-6243","first_name":"Tobias Martin"},{"last_name":"Scharlau","orcid":"0000-0003-2364-9489","id":"451","full_name":"Scharlau, Ingrid","first_name":"Ingrid"},{"first_name":"Barbara","last_name":"Hammer","full_name":"Hammer, Barbara"}],"date_created":"2025-05-02T09:26:15Z","date_updated":"2025-05-15T11:16:27Z","publisher":"Elsevier BV","doi":"10.1016/j.cogsys.2025.101357","title":"Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation"},{"title":"An Empirical Investigation of Users' Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity","conference":{"location":"Valetta, Malta","end_date":"2024-07-19","start_date":"2024-07-17","name":"2nd World Conference on eXplainable Artificial Intelligence"},"date_updated":"2024-10-09T15:06:00Z","date_created":"2024-10-09T14:57:49Z","author":[{"last_name":"Liedeker","id":"93275","full_name":"Liedeker, Felix","first_name":"Felix"},{"full_name":"Düsing, Christoph","last_name":"Düsing","first_name":"Christoph"},{"first_name":"Marcel","last_name":"Nieveler","full_name":"Nieveler, Marcel"},{"first_name":"Philipp","last_name":"Cimiano","full_name":"Cimiano, Philipp"}],"year":"2024","citation":{"bibtex":"@inproceedings{Liedeker_Düsing_Nieveler_Cimiano_2024, title={An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity}, author={Liedeker, Felix and Düsing, Christoph and Nieveler, Marcel and Cimiano, Philipp}, year={2024} }","short":"F. Liedeker, C. Düsing, M. Nieveler, P. Cimiano, in: 2024.","mla":"Liedeker, Felix, et al. <i>An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity</i>. 2024.","apa":"Liedeker, F., Düsing, C., Nieveler, M., &#38; Cimiano, P. (2024). <i>An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity</i>. 2nd World Conference on eXplainable Artificial Intelligence, Valetta, Malta.","chicago":"Liedeker, Felix, Christoph Düsing, Marcel Nieveler, and Philipp Cimiano. “An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity,” 2024.","ieee":"F. Liedeker, C. Düsing, M. Nieveler, and P. Cimiano, “An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity,” presented at the 2nd World Conference on eXplainable Artificial Intelligence, Valetta, Malta, 2024.","ama":"Liedeker F, Düsing C, Nieveler M, Cimiano P. An Empirical Investigation of Users’ Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity. In: ; 2024."},"keyword":["XAI","Explanation Complexity","User Perception"],"language":[{"iso":"eng"}],"project":[{"name":"TRR 318 - C5: TRR 318 - Subproject C5","_id":"128"}],"_id":"56479","user_id":"93275","department":[{"_id":"660"}],"abstract":[{"lang":"eng","text":"While the importance of explainable artificial intelligence in high-stakes decision-making is widely recognized in existing literature, empirical studies assessing users' perceived value of explanations are scarce. In this paper, we aim to address this shortcoming by conducting an empirical study focused on measuring the perceived value of the following types of explanations: plain explanations based on feature attribution, counterfactual explanations and complex counterfactual explanations. We measure an explanation's value using five dimensions: perceived accuracy, understandability, plausibility, sufficiency of detail, and user satisfaction. Our findings indicate a sweet spot of explanation complexity, with both dimensional and structural complexity positively impacting the perceived value up to a certain threshold."}],"status":"public","type":"conference"},{"year":"2023","quality_controlled":"1","title":"Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue","date_created":"2023-10-30T09:29:16Z","abstract":[{"lang":"eng","text":"Explanation has been identified as an important capability for AI-based systems, but research on systematic strategies for achieving understanding in interaction with such systems is still sparse. Negation is a linguistic strategy that is often used in explanations. It creates a contrast space between the affirmed and the negated item that enriches explaining processes with additional contextual information. While negation in human speech has been shown to lead to higher processing costs and worse task performance in terms of recall or action execution when used in isolation, it can decrease processing costs when used in context. So far, it has not been considered as a guiding strategy for explanations in human-robot interaction. We conducted an empirical study to investigate the use of negation as a guiding strategy in explanatory human-robot dialogue, in which a virtual robot explains tasks and possible actions to a human explainee to solve them in terms of gestures on a touchscreen. Our results show that negation vs. affirmation 1) increases processing costs measured as reaction time and 2) increases several aspects of task performance. While there was no significant effect of negation on the number of initially correctly executed gestures, we found a significantly lower number of attempts—measured as breaks in the finger movement data before the correct gesture was carried out—when being instructed through a negation. We further found that the gestures significantly resembled the presented prototype gesture more following an instruction with a negation as opposed to an affirmation. Also, the participants rated the benefit of contrastive vs. affirmative explanations significantly higher. Repeating the instructions decreased the effects of negation, yielding similar processing costs and task performance measures for negation and affirmation after several iterations. We discuss our results with respect to possible effects of negation on linguistic processing of explanations and limitations of our study."}],"publication":"Frontiers in Robotics and AI","keyword":["HRI","XAI","negation","understanding","explaining","touch interaction","gesture"],"language":[{"iso":"eng"}],"citation":{"mla":"Groß, A., et al. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","short":"A. Groß, A. Singh, N.C. Banh, B. Richter, I. Scharlau, K.J. Rohlfing, B. Wrede, Frontiers in Robotics and AI 10 (2023).","bibtex":"@article{Groß_Singh_Banh_Richter_Scharlau_Rohlfing_Wrede_2023, title={Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue}, volume={10}, DOI={<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>}, journal={Frontiers in Robotics and AI}, author={Groß, A. and Singh, Amit and Banh, Ngoc Chi and Richter, B. and Scharlau, Ingrid and Rohlfing, Katharina J. and Wrede, B.}, year={2023} }","apa":"Groß, A., Singh, A., Banh, N. C., Richter, B., Scharlau, I., Rohlfing, K. J., &#38; Wrede, B. (2023). Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>, <i>10</i>. <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>","ama":"Groß A, Singh A, Banh NC, et al. Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>. 2023;10. doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>","chicago":"Groß, A., Amit Singh, Ngoc Chi Banh, B. Richter, Ingrid Scharlau, Katharina J. Rohlfing, and B. Wrede. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i> 10 (2023). <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>.","ieee":"A. Groß <i>et al.</i>, “Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue,” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi: <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>."},"intvolume":"        10","publication_status":"published","main_file_link":[{"url":"https://www.frontiersin.org/articles/10.3389/frobt.2023.1236184/full","open_access":"1"}],"doi":"10.3389/frobt.2023.1236184","date_updated":"2024-06-26T08:01:50Z","oa":"1","author":[{"first_name":"A.","full_name":"Groß, A.","last_name":"Groß"},{"orcid":"0000-0002-7789-1521","last_name":"Singh","full_name":"Singh, Amit","id":"91018","first_name":"Amit"},{"orcid":"0000-0002-5946-4542","last_name":"Banh","full_name":"Banh, Ngoc Chi","id":"38219","first_name":"Ngoc Chi"},{"last_name":"Richter","full_name":"Richter, B.","first_name":"B."},{"first_name":"Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451"},{"full_name":"Rohlfing, Katharina J.","id":"50352","last_name":"Rohlfing","first_name":"Katharina J."},{"first_name":"B.","last_name":"Wrede","full_name":"Wrede, B."}],"volume":10,"status":"public","type":"journal_article","article_type":"original","funded_apc":"1","project":[{"grant_number":"438445824","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115"}],"_id":"48543","user_id":"38219","department":[{"_id":"749"}]},{"publication":"Journal of Decision Systems","type":"journal_article","status":"public","abstract":[{"lang":"eng","text":"Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity."}],"department":[{"_id":"195"},{"_id":"196"}],"user_id":"77066","_id":"45299","language":[{"iso":"eng"}],"keyword":["Explainable AI (XAI)","machine learning","interpretability","real estate appraisal","framework","taxonomy"],"publication_identifier":{"issn":["1246-0125","2116-7052"]},"publication_status":"published","page":"1-41","citation":{"apa":"Kucklick, J.-P. (2023). HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>","short":"J.-P. Kucklick, Journal of Decision Systems (2023) 1–41.","bibtex":"@article{Kucklick_2023, title={HIEF: a holistic interpretability and explainability framework}, DOI={<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>}, journal={Journal of Decision Systems}, publisher={Taylor &#38; Francis}, author={Kucklick, Jan-Peter}, year={2023}, pages={1–41} }","mla":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, Taylor &#38; Francis, 2023, pp. 1–41, doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","chicago":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, 2023, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>.","ieee":"J.-P. Kucklick, “HIEF: a holistic interpretability and explainability framework,” <i>Journal of Decision Systems</i>, pp. 1–41, 2023, doi: <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","ama":"Kucklick J-P. HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>. Published online 2023:1-41. doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>"},"year":"2023","author":[{"first_name":"Jan-Peter","full_name":"Kucklick, Jan-Peter","id":"77066","last_name":"Kucklick"}],"date_created":"2023-05-26T05:04:45Z","publisher":"Taylor & Francis","date_updated":"2023-05-26T05:08:36Z","doi":"10.1080/12460125.2023.2207268","main_file_link":[{"url":"https://www.tandfonline.com/doi/full/10.1080/12460125.2023.2207268"}],"title":"HIEF: a holistic interpretability and explainability framework"},{"year":"2022","citation":{"apa":"Kucklick, J.-P. (2022). Visual Interpretability of Image-based Real Estate Appraisal. <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>. Hawaii International Conference on System Science (HICSS), Virtual.","short":"J.-P. Kucklick, in: 55th Annual Hawaii International Conference on System Sciences (HICSS-55), 2022.","bibtex":"@inproceedings{Kucklick_2022, title={Visual Interpretability of Image-based Real Estate Appraisal}, booktitle={55th Annual Hawaii International Conference on System Sciences (HICSS-55)}, author={Kucklick, Jan-Peter}, year={2022} }","mla":"Kucklick, Jan-Peter. “Visual Interpretability of Image-Based Real Estate Appraisal.” <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>, 2022.","ama":"Kucklick J-P. Visual Interpretability of Image-based Real Estate Appraisal. In: <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>. ; 2022.","ieee":"J.-P. Kucklick, “Visual Interpretability of Image-based Real Estate Appraisal,” presented at the Hawaii International Conference on System Science (HICSS), Virtual, 2022.","chicago":"Kucklick, Jan-Peter. “Visual Interpretability of Image-Based Real Estate Appraisal.” In <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>, 2022."},"title":"Visual Interpretability of Image-based Real Estate Appraisal","main_file_link":[{"open_access":"1","url":"https://scholarspace.manoa.hawaii.edu/bitstream/10125/79519/0149.pdf"}],"conference":{"name":"Hawaii International Conference on System Science (HICSS)","start_date":"2022-01-03","end_date":"2022-01-07","location":"Virtual"},"date_updated":"2022-01-06T06:57:40Z","oa":"1","author":[{"first_name":"Jan-Peter","full_name":"Kucklick, Jan-Peter","id":"77066","last_name":"Kucklick"}],"date_created":"2021-11-17T07:08:15Z","abstract":[{"text":"Explainability for machine learning gets more and more important in high-stakes decisions like real estate appraisal. While traditional hedonic house pricing models are fed with hard information based on housing attributes, recently also soft information has been incorporated to increase the predictive performance. This soft information can be extracted from image data by complex models like Convolutional Neural Networks (CNNs). However, these are intransparent which excludes their use for high-stakes financial decisions. To overcome this limitation, we examine if a two-stage modeling approach can provide explainability. We combine visual interpretability by Regression Activation Maps (RAM) for the CNN and a linear regression for the overall prediction. Our experiments are based on 62.000 family homes in Philadelphia and the results indicate that the CNN learns aspects related to vegetation and quality aspects of the house from exterior images, improving the predictive accuracy of real estate appraisal by up to 5.4%.","lang":"eng"}],"status":"public","type":"conference","publication":"55th Annual Hawaii International Conference on System Sciences (HICSS-55)","keyword":["Explainable Artificial Intelligence (XAI)","Regression Activation Maps","Real Estate Appraisal","Convolutional Block Attention Module","Computer Vision"],"language":[{"iso":"eng"}],"_id":"27506","user_id":"77066","department":[{"_id":"195"},{"_id":"196"}]},{"keyword":["Explainable Artificial Intelligence","XAI","Interpretability","Decision Support Systems","Taxonomy"],"language":[{"iso":"eng"}],"_id":"29539","department":[{"_id":"195"},{"_id":"196"}],"user_id":"77066","abstract":[{"text":"Explainable Artificial Intelligence (XAI) is currently an important topic for the application of Machine Learning (ML) in high-stakes decision scenarios. Related research focuses on evaluating ML algorithms in terms of interpretability. However, providing a human understandable explanation of an intelligent system does not only relate to the used ML algorithm. The data and features used also have a considerable impact on interpretability. In this paper, we develop a taxonomy for describing XAI systems based on aspects about the algorithm and data. The proposed taxonomy gives researchers and practitioners opportunities to describe and evaluate current XAI systems with respect to interpretability and guides the future development of this class of systems.","lang":"eng"}],"status":"public","publication":"Wirtschaftsinformatik 2022 Proceedings","type":"conference","title":"Towards a model- and data-focused taxonomy of XAI systems","conference":{"location":"Nürnberg (online)","end_date":"2022-02-23","start_date":"2022-02-21","name":"Wirtschaftsinformatik 2022 (WI22)"},"main_file_link":[{"url":"https://aisel.aisnet.org/cgi/viewcontent.cgi?article=1056&context=wi2022","open_access":"1"}],"date_updated":"2022-01-26T08:24:30Z","oa":"1","author":[{"first_name":"Jan-Peter","full_name":"Kucklick, Jan-Peter","id":"77066","last_name":"Kucklick"}],"date_created":"2022-01-26T08:22:03Z","year":"2022","citation":{"ieee":"J.-P. Kucklick, “Towards a model- and data-focused taxonomy of XAI systems,” presented at the Wirtschaftsinformatik 2022 (WI22), Nürnberg (online), 2022.","chicago":"Kucklick, Jan-Peter. “Towards a Model- and Data-Focused Taxonomy of XAI Systems.” In <i>Wirtschaftsinformatik 2022 Proceedings</i>, 2022.","ama":"Kucklick J-P. Towards a model- and data-focused taxonomy of XAI systems. In: <i>Wirtschaftsinformatik 2022 Proceedings</i>. ; 2022.","short":"J.-P. Kucklick, in: Wirtschaftsinformatik 2022 Proceedings, 2022.","bibtex":"@inproceedings{Kucklick_2022, title={Towards a model- and data-focused taxonomy of XAI systems}, booktitle={Wirtschaftsinformatik 2022 Proceedings}, author={Kucklick, Jan-Peter}, year={2022} }","mla":"Kucklick, Jan-Peter. “Towards a Model- and Data-Focused Taxonomy of XAI Systems.” <i>Wirtschaftsinformatik 2022 Proceedings</i>, 2022.","apa":"Kucklick, J.-P. (2022). Towards a model- and data-focused taxonomy of XAI systems. <i>Wirtschaftsinformatik 2022 Proceedings</i>. Wirtschaftsinformatik 2022 (WI22), Nürnberg (online)."}}]
