@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{45299,
  abstract     = {{Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity.}},
  author       = {{Kucklick, Jan-Peter}},
  issn         = {{1246-0125}},
  journal      = {{Journal of Decision Systems}},
  keywords     = {{Explainable AI (XAI), machine learning, interpretability, real estate appraisal, framework, taxonomy}},
  pages        = {{1--41}},
  publisher    = {{Taylor & Francis}},
  title        = {{{HIEF: a holistic interpretability and explainability framework}}},
  doi          = {{10.1080/12460125.2023.2207268}},
  year         = {{2023}},
}

@inproceedings{56477,
  abstract     = {{We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS.}},
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  keywords     = {{Explainable AI, Clinical decision support, Bayesian network, Counterfactual explanations}},
  location     = {{Lissabon}},
  title        = {{{A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}}},
  year         = {{2023}},
}

