@inbook{65061,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.
                    <jats:ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001" ext-link-type="uri">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>
                    ) and offer three structures that can help to organize responsibility for
                    <jats:italic>decisions made</jats:italic>
                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.
                  </jats:p>}},
  author       = {{Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{157--177}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Responsibilities in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_9}},
  year         = {{2026}},
}

@inbook{65063,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to
                    <jats:italic>tasking AI fairly</jats:italic>
                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.
                  </jats:p>}},
  author       = {{Alpsancar, Suzana and Stamboliev, Eugenia}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{557--581}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}}},
  doi          = {{10.1007/978-981-96-5290-7_29}},
  year         = {{2026}},
}

@inbook{65064,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>}},
  author       = {{Alpsancar, Suzana and Klenk, Michael}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{583--616}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{The Risk of Manipulation and Deception in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_30}},
  year         = {{2026}},
}

@inproceedings{60820,
  author       = {{Philippi, Martina}},
  location     = {{BTU Cottbus-Senftenberg, Cottbus}},
  title        = {{{Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation}}},
  year         = {{2025}},
}

@inproceedings{60821,
  author       = {{Philippi, Martina}},
  location     = {{Berlin}},
  title        = {{{Grenzen des Verstehens}}},
  year         = {{2025}},
}

@inbook{60234,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>It has become a new global trend that governments are partially automating decision-making processes by public agencies. This, however, has led to some scandals revealing grave injustices, including the Robodebt scandal in Australia and the childcare benefit scandal in the Netherlands. This chapter argues that the normative impacts of the move towards automated decision-making can be fruitfully understood and addressed through the lens of civic virtue. It starts by outlining the Dutch childcare benefit scandal, showing that what happened cannot be reduced solely to human intent or structural factors, but needs to address the in-between term of human moral dispositions. Following this insight, the chapter outlines a framework of civic virtue, which outlines ideal states (civic virtue) and their deviations (civic vice) for different temporal configurations (past-, present-, and future-oriented civic virtue). Finally, the chapter uses this framework to reflect on the much-touted principle of ‘explainability’ in addressing harms like the ones done to citizens in the childcare benefit scandal. Three impacts are laid bare through the lens of civic virtue, of servility, presumptuousness, and political recalcitrance. Explainability, the chapter argues, may successfully address these impacts, but only if it ceases to focus solely on narrow black box problems in AI and address public governance more holistically.</jats:p>}},
  author       = {{Reijers, Wessel}},
  booktitle    = {{Public Governance and Emerging Technologies}},
  isbn         = {{9783031847479}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Civic Vice in Digital Governance}}},
  doi          = {{10.1007/978-3-031-84748-6_14}},
  year         = {{2025}},
}

@book{60235,
  author       = {{Reijers, Wessel and Thomas Young, Mark  and Coeckelbergh, Mark }},
  title        = {{{Introduction to the Ethics of Emerging Technologies}}},
  year         = {{2025}},
}

@article{60233,
  abstract     = {{<jats:title>ABSTRACT</jats:title><jats:p>Emerging technologies pose many new challenges for regulation and governance on a global scale. With the advent of distributed communication networks like the Internet and decentralized ledger technologies like blockchain, new platforms emerged, disrupting existing power dynamics and bringing about new claims of sovereignty from the private sector. This special issue addresses a gap in the literature by focusing the discourse on the issue of <jats:italic>trust</jats:italic> and <jats:italic>confidence</jats:italic> in the digital realm. In particular, looking at the evolution of the web (from Web 1.0, to Web 2.0, and then Web 3), this article analyses how every iteration reflects a different way of dealing with the problem of <jats:italic>trust</jats:italic> online, resulting in a different regulation and governance landscape. Technology is often regarded as a new lever of regulation, attempting to resolve the problem of “trust” online, either through the introduction of a new trusted authority (Web 2.0) or through the introduction of technological guarantees that provide more assurance—or “confidence”—in the way interactions can be operationalized (Web 3). Yet, each of these technologies also introduce new risks and governance costs, ultimately shifting the problem of trust in a new direction rather than resolving it or removing the need for trust altogether. The main contribution of the articles in this special issue is providing a better understanding of the trust challenges faced and posed by emerging technologies and demonstrating how they affect institutional governance—in both theory and practice—with a view to help policymakers find appropriate answers to these challenges.</jats:p>}},
  author       = {{de Filippi, Primavera and Mannan, Morshed and Reijers, Wessel}},
  issn         = {{1748-5983}},
  journal      = {{Regulation & Governance}},
  publisher    = {{Wiley}},
  title        = {{{How to Govern the Confidence Machine?}}},
  doi          = {{10.1111/rego.70017}},
  year         = {{2025}},
}

@unpublished{59917,
  abstract     = {{nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.}},
  author       = {{Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}},
  booktitle    = {{arXiv}},
  title        = {{{Healthy Distrust in AI systems}}},
  year         = {{2025}},
}

@article{57531,
  author       = {{Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}},
  journal      = {{AI and Ethics}},
  pages        = {{3015–3033}},
  publisher    = {{Springer}},
  title        = {{{Explanation needs and ethical demands: unpacking the instrumental value of XAI}}},
  doi          = {{https://doi.org/10.1007/s43681-024-00622-3}},
  volume       = {{5}},
  year         = {{2025}},
}

@inbook{62305,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana}},
  booktitle    = {{Digital Development. Technology, Ethics and Governance}},
  editor       = {{Farina, Mirko  and Yu, Xiao  and Chen, Jin}},
  isbn         = {{9781003567622}},
  publisher    = {{Routledge}},
  title        = {{{Explainability and AI Governance}}},
  doi          = {{10.4324/9781003567622-22}},
  year         = {{2025}},
}

@inbook{55869,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{ Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht}},
  editor       = {{Adolphi, Rainer and Alpsancar, Suzana and Hahn, Susanne and Kettner, Matthias}},
  pages        = {{55--113}},
  publisher    = {{transcript}},
  title        = {{{Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen}}},
  year         = {{2024}},
}

@inproceedings{57172,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana and Philippi, Martina}},
  booktitle    = {{Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.}},
  title        = {{{AI explainability, temporality, and civic virtue}}},
  year         = {{2024}},
}

@inproceedings{56217,
  author       = {{Alpsancar, Suzana and Matzner, Tobias and Philippi, Martina}},
  booktitle    = {{Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT}},
  pages        = {{31--35}},
  publisher    = {{Universidad de La Rioja}},
  title        = {{{Unpacking the purposes of explainable AI}}},
  year         = {{2024}},
}

@inproceedings{60826,
  author       = {{Philippi, Martina}},
  booktitle    = {{TA24}},
  location     = {{ÖAW Wien}},
  title        = {{{Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung}}},
  year         = {{2024}},
}

@inproceedings{60825,
  author       = {{Philippi, Martina and Mindlin, Dimitry}},
  booktitle    = {{EASST-4S}},
  location     = {{VU Amsterdam}},
  title        = {{{Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling}}},
  year         = {{2024}},
}

@inproceedings{60824,
  author       = {{Philippi, Martina}},
  booktitle    = {{fpet2024}},
  location     = {{ZKM Karlsruhe}},
  title        = {{{How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI}}},
  year         = {{2024}},
}

@inproceedings{60829,
  author       = {{Philippi, Martina and Reijers, Wessel}},
  location     = {{Paderborn}},
  title        = {{{Ethics of Explainable AI}}},
  year         = {{2024}},
}

@inproceedings{60827,
  author       = {{Philippi, Martina}},
  location     = {{Paderborn}},
  title        = {{{Dual-use potential in humanitarian UAVs}}},
  year         = {{2024}},
}

@misc{60831,
  author       = {{Philippi, Martina}},
  title        = {{{Interdisciplinary challenges for XAI ethics and the potential of the phenomenological approach. Gastvortrag am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.}}},
  year         = {{2024}},
}

