@inbook{65061,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.
                    <jats:ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001" ext-link-type="uri">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>
                    ) and offer three structures that can help to organize responsibility for
                    <jats:italic>decisions made</jats:italic>
                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.
                  </jats:p>}},
  author       = {{Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{157--177}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Responsibilities in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_9}},
  year         = {{2026}},
}

@inbook{65063,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to
                    <jats:italic>tasking AI fairly</jats:italic>
                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.
                  </jats:p>}},
  author       = {{Alpsancar, Suzana and Stamboliev, Eugenia}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{557--581}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}}},
  doi          = {{10.1007/978-981-96-5290-7_29}},
  year         = {{2026}},
}

@inbook{65064,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>}},
  author       = {{Alpsancar, Suzana and Klenk, Michael}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{583--616}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{The Risk of Manipulation and Deception in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_30}},
  year         = {{2026}},
}

@inbook{62709,
  author       = {{Reijers, Wessel and Alpsancar, Suzana}},
  booktitle    = {{Social explainable AI. Communications of NII Shonan Meetings}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{179--195}},
  publisher    = {{Springer}},
  title        = {{{Values and Norms in sXAI}}},
  year         = {{2026}},
}

@book{65065,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Social Explainable AI}}},
  doi          = {{10.1007/978-981-96-5290-7_1}},
  year         = {{2026}},
}

@inbook{61517,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{Algorithmische Wissenskulturen. Der Einfluss des Computers auf die Wissenschaftsentwicklung}},
  editor       = {{Hashagen, Ulf and Seising, Rudolf}},
  isbn         = {{9783658355593}},
  issn         = {{2730-7425}},
  pages        = {{327–365}},
  publisher    = {{Springer}},
  title        = {{{Algorithmische Kulturen des Pflanzensammelns? Das Beispiel der Computerisierung des Botanischen Gartens und Botanischen Museums Berlin}}},
  doi          = {{10.1007/978-3-658-35560-9_14}},
  year         = {{2025}},
}

@unpublished{59917,
  abstract     = {{nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.}},
  author       = {{Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}},
  booktitle    = {{arXiv}},
  title        = {{{Healthy Distrust in AI systems}}},
  year         = {{2025}},
}

@article{57531,
  author       = {{Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}},
  journal      = {{AI and Ethics}},
  pages        = {{3015–3033}},
  publisher    = {{Springer}},
  title        = {{{Explanation needs and ethical demands: unpacking the instrumental value of XAI}}},
  doi          = {{https://doi.org/10.1007/s43681-024-00622-3}},
  volume       = {{5}},
  year         = {{2025}},
}

@inbook{62305,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana}},
  booktitle    = {{Digital Development. Technology, Ethics and Governance}},
  editor       = {{Farina, Mirko  and Yu, Xiao  and Chen, Jin}},
  isbn         = {{9781003567622}},
  publisher    = {{Routledge}},
  title        = {{{Explainability and AI Governance}}},
  doi          = {{10.4324/9781003567622-22}},
  year         = {{2025}},
}

@inbook{55869,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{ Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht}},
  editor       = {{Adolphi, Rainer and Alpsancar, Suzana and Hahn, Susanne and Kettner, Matthias}},
  pages        = {{55--113}},
  publisher    = {{transcript}},
  title        = {{{Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen}}},
  year         = {{2024}},
}

@inproceedings{57172,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana and Philippi, Martina}},
  booktitle    = {{Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.}},
  title        = {{{AI explainability, temporality, and civic virtue}}},
  year         = {{2024}},
}

@inproceedings{56217,
  author       = {{Alpsancar, Suzana and Matzner, Tobias and Philippi, Martina}},
  booktitle    = {{Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT}},
  pages        = {{31--35}},
  publisher    = {{Universidad de La Rioja}},
  title        = {{{Unpacking the purposes of explainable AI}}},
  year         = {{2024}},
}

@book{58692,
  editor       = {{Alpsancar, Suzana}},
  isbn         = {{ISBN 978-3-7560-1830-7}},
  publisher    = {{Nomos}},
  title        = {{{ Der Sog des Neuen und der Schock des Alten Jahrbuch Technikphilosophie 2024 }}},
  volume       = {{10. Jahrgang}},
  year         = {{2024}},
}

@book{48365,
  abstract     = {{<p>With the focus topic “The Human Factor”, the <italic>Jahrbuch Technikphilosophie 2023 </italic>aims to investigate on the one hand the manifold arrangements of the deficiency in the technical field (and especially “new” technologies): How do machine worlds, user interfaces, implementation strategies, or even entire large-scale technological ecosystems model, compensate, and even parody “the” human being – that is, “their” version of ourselves? How does technology discriminate? How does it educate? To what extent can it “reduce” the human? On the other hand, it is necessary to take a new look at what “human” actually means in precisely this context, and to reexamine anthropology as part of theories of technology and discourses on technology. Criticism of technology must therefore also be criticism of man.

<bold>With contributions by</bold>
Fabian Anicker, Petra Gehring, Axel Gelfert, Martina Heßler, Andreas Kaminski, Ruth Karl, Katerina Krtilova, Joachim Landkammer, Kevin Liggieri, Felix Maschewski, Nicola Mößner, Anna-Verena Nosthoff, Felix Reda, Jean Paul Sartre, Björn Schembera, Stefan Schöberlein, Marcel Siegler, Florian Sprenger and Martin Warnke.</p>}},
  editor       = {{Alpsancar, Suzana and Friedrich, Alexander and Gehring, Petra and Hubig, Christoph and Kaminski, Andreas and Nordmann, Alfred}},
  isbn         = {{9783748941767}},
  publisher    = {{Nomos}},
  title        = {{{Faktor Mensch. Jahrbuch Technikphilosophie 2023}}},
  doi          = {{10.5771/9783748941767}},
  year         = {{2023}},
}

@inproceedings{44853,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{International Conference on Computer Ethics 2023}},
  location     = {{Chicago, Illinois}},
  number       = {{1}},
  pages        = {{1----17}},
  title        = {{{What is AI Ethics? Ethics as means of self-regulation and the need for critical reflection }}},
  volume       = {{1}},
  year         = {{2023}},
}

@article{41874,
  author       = {{Pfeiffer, Jella and Gutschow, Julia and Haas, Christian and Möslein, Florian and Maspfuhl, Oliver and Borgers, Frederik and Alpsancar, Suzana}},
  issn         = {{2363-7005}},
  journal      = {{Business &amp; Information Systems Engineering}},
  keywords     = {{Information Systems}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Algorithmic Fairness in AI}}},
  doi          = {{10.1007/s12599-023-00787-x}},
  year         = {{2023}},
}

@inbook{34619,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{Technikfolgenabschätzung. Handbuch für Wissenschaft und Praxis}},
  editor       = {{Böschen, Stefan and Grunwald, Armin and Krings, Bettina-Johanna and Rösch, Christine}},
  isbn         = {{978-3-8487-6070-1}},
  pages        = {{229 -- 239}},
  publisher    = {{Nomos}},
  title        = {{{TA und Zeitdiagnosen. TA als Zeitdiagnose?}}},
  doi          = {{10.5771/9783748901990}},
  year         = {{2021}},
}

@inbook{34567,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{Autonome Autos – Die Zukunft des Verkehrs und die Dispositive der Mobilität}},
  editor       = {{Sprenger, Florian}},
  isbn         = {{9783837650242}},
  issn         = {{2702-8852}},
  pages        = {{373 -- 397}},
  publisher    = {{transcript Verlag}},
  title        = {{{„Vom Fahr-Zeug zum Fahr-Ding. Ein Heideggerianischer Kommentar zum autonomen Automobil“}}},
  doi          = {{10.14361/9783839450246}},
  year         = {{2021}},
}

@misc{35447,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{Mensch-Maschine-Interaktion: Handbuch zu Geschichte – Kultur – Ethik}},
  editor       = {{Liggieri, Kevin and Müller, Oliver}},
  isbn         = {{978-3-476-05604-7}},
  pages        = {{291–293}},
  publisher    = {{J.B. Metzler}},
  title        = {{{Organprojektion}}},
  doi          = {{10.1007/978-3-476-05604-7_53}},
  year         = {{2019}},
}

@misc{35445,
  author       = {{Alpsancar, Suzana}},
  booktitle    = {{Mensch-Maschine-Interaktion: Handbuch zu Geschichte – Kultur – Ethik}},
  editor       = {{Liggieri, Kevin and Müller, Oliver}},
  isbn         = {{978-3-476-05604-7}},
  pages        = {{244–246}},
  publisher    = {{J.B. Metzler}},
  title        = {{{Computation}}},
  doi          = {{10.1007/978-3-476-05604-7_37}},
  year         = {{2019}},
}

