[{"date_updated":"2024-02-26T08:46:26Z","author":[{"last_name":"Esposito","full_name":"Esposito, Elena ","first_name":"Elena "}],"date_created":"2024-02-18T10:16:43Z","volume":16,"title":"Does Explainability Require Transparency?","doi":"10.6092/ISSN.1971-8853/15804","issue":"3","year":"2023","citation":{"ieee":"E. Esposito, “Does Explainability Require Transparency?,” <i>Sociologica</i>, vol. 16, no. 3, pp. 17–27, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>.","chicago":"Esposito, Elena . “Does Explainability Require Transparency?” <i>Sociologica</i> 16, no. 3 (2023): 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>.","ama":"Esposito E. Does Explainability Require Transparency? <i>Sociologica</i>. 2023;16(3):17-27. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>","short":"E. Esposito, Sociologica 16 (2023) 17–27.","mla":"Esposito, Elena. “Does Explainability Require Transparency?” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 17–27, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>.","bibtex":"@article{Esposito_2023, title={Does Explainability Require Transparency?}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">10.6092/ISSN.1971-8853/15804</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena }, year={2023}, pages={17–27} }","apa":"Esposito, E. (2023). Does Explainability Require Transparency? <i>Sociologica</i>, <i>16</i>(3), 17–27. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/15804\">https://doi.org/10.6092/ISSN.1971-8853/15804</a>"},"intvolume":"        16","page":"17-27","project":[{"_id":"121","name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","grant_number":"438445824"}],"_id":"51368","user_id":"54779","department":[{"_id":"660"}],"keyword":["Explainable AI","Transparency","Explanation","Communication","Sociological systems theory"],"language":[{"iso":"eng"}],"type":"journal_article","publication":"Sociologica","abstract":[{"text":"Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.","lang":"eng"}],"status":"public"},{"type":"journal_article","publication":"Sociologica","abstract":[{"lang":"eng","text":"This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions."}],"status":"public","project":[{"name":"TRR 318 - B01: TRR 318 - Ein dialogbasierter Ansatz zur Erklärung von Modellen des maschinellen Lernens (Teilprojekt B01)","_id":"121","grant_number":"438445824"}],"_id":"51369","user_id":"54779","department":[{"_id":"660"}],"keyword":["Explainable AI","Inexplicability","Transparency","Explanation","Opacity","Contestability"],"language":[{"iso":"eng"}],"issue":"3","year":"2023","citation":{"ama":"Esposito E. Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>. 2023;16(3):1-4. doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>","ieee":"E. Esposito, “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction,” <i>Sociologica</i>, vol. 16, no. 3, pp. 1–4, 2023, doi: <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","chicago":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i> 16, no. 3 (2023): 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>.","mla":"Esposito, Elena. “Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction.” <i>Sociologica</i>, vol. 16, no. 3, 2023, pp. 1–4, doi:<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>.","short":"E. Esposito, Sociologica 16 (2023) 1–4.","bibtex":"@article{Esposito_2023, title={Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}, volume={16}, DOI={<a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">10.6092/ISSN.1971-8853/16265</a>}, number={3}, journal={Sociologica}, author={Esposito, Elena}, year={2023}, pages={1–4} }","apa":"Esposito, E. (2023). Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction. <i>Sociologica</i>, <i>16</i>(3), 1–4. <a href=\"https://doi.org/10.6092/ISSN.1971-8853/16265\">https://doi.org/10.6092/ISSN.1971-8853/16265</a>"},"page":"1-4","intvolume":"        16","date_updated":"2024-02-26T08:45:56Z","date_created":"2024-02-18T10:23:23Z","author":[{"first_name":"Elena","full_name":"Esposito, Elena","last_name":"Esposito"}],"volume":16,"title":"Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction","doi":"10.6092/ISSN.1971-8853/16265"},{"abstract":[{"lang":"eng","text":"Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity."}],"status":"public","publication":"Journal of Decision Systems","type":"journal_article","keyword":["Explainable AI (XAI)","machine learning","interpretability","real estate appraisal","framework","taxonomy"],"language":[{"iso":"eng"}],"_id":"45299","department":[{"_id":"195"},{"_id":"196"}],"user_id":"77066","year":"2023","page":"1-41","citation":{"mla":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, Taylor &#38; Francis, 2023, pp. 1–41, doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","short":"J.-P. Kucklick, Journal of Decision Systems (2023) 1–41.","bibtex":"@article{Kucklick_2023, title={HIEF: a holistic interpretability and explainability framework}, DOI={<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>}, journal={Journal of Decision Systems}, publisher={Taylor &#38; Francis}, author={Kucklick, Jan-Peter}, year={2023}, pages={1–41} }","apa":"Kucklick, J.-P. (2023). HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>","chicago":"Kucklick, Jan-Peter. “HIEF: A Holistic Interpretability and Explainability Framework.” <i>Journal of Decision Systems</i>, 2023, 1–41. <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">https://doi.org/10.1080/12460125.2023.2207268</a>.","ieee":"J.-P. Kucklick, “HIEF: a holistic interpretability and explainability framework,” <i>Journal of Decision Systems</i>, pp. 1–41, 2023, doi: <a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>.","ama":"Kucklick J-P. HIEF: a holistic interpretability and explainability framework. <i>Journal of Decision Systems</i>. Published online 2023:1-41. doi:<a href=\"https://doi.org/10.1080/12460125.2023.2207268\">10.1080/12460125.2023.2207268</a>"},"publication_identifier":{"issn":["1246-0125","2116-7052"]},"publication_status":"published","title":"HIEF: a holistic interpretability and explainability framework","doi":"10.1080/12460125.2023.2207268","main_file_link":[{"url":"https://www.tandfonline.com/doi/full/10.1080/12460125.2023.2207268"}],"date_updated":"2023-05-26T05:08:36Z","publisher":"Taylor & Francis","date_created":"2023-05-26T05:04:45Z","author":[{"full_name":"Kucklick, Jan-Peter","id":"77066","last_name":"Kucklick","first_name":"Jan-Peter"}]},{"language":[{"iso":"eng"}],"keyword":["Explainable AI","Clinical decision support","Bayesian network","Counterfactual explanations"],"user_id":"93275","department":[{"_id":"660"}],"project":[{"name":"TRR 318 - C5: TRR 318 - Subproject C5","_id":"128"}],"_id":"56477","status":"public","abstract":[{"lang":"eng","text":"We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS."}],"type":"conference","conference":{"end_date":"2023-07-28","location":"Lissabon","name":"xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023)","start_date":"2023-07-26"},"title":"A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations","author":[{"full_name":"Liedeker, Felix","id":"93275","last_name":"Liedeker","first_name":"Felix"},{"first_name":"Philipp","last_name":"Cimiano","full_name":"Cimiano, Philipp"}],"date_created":"2024-10-09T14:50:09Z","date_updated":"2024-10-09T15:04:53Z","citation":{"mla":"Liedeker, Felix, and Philipp Cimiano. <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. 2023.","bibtex":"@inproceedings{Liedeker_Cimiano_2023, title={A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}, author={Liedeker, Felix and Cimiano, Philipp}, year={2023} }","short":"F. Liedeker, P. Cimiano, in: 2023.","apa":"Liedeker, F., &#38; Cimiano, P. (2023). <i>A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations</i>. xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon.","ama":"Liedeker F, Cimiano P. A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations. In: ; 2023.","chicago":"Liedeker, Felix, and Philipp Cimiano. “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” 2023.","ieee":"F. Liedeker and P. Cimiano, “A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations,” presented at the xAI-2023 Late-breaking Work, Demos and Doctoral Consortium co-located with the 1st World Conference on eXplainable Artificial Intelligence (xAI-2023), Lissabon, 2023."},"year":"2023"}]
