@inbook{65063,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to
                    <jats:italic>tasking AI fairly</jats:italic>
                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.
                  </jats:p>}},
  author       = {{Alpsancar, Suzana and Stamboliev, Eugenia}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{557--581}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}}},
  doi          = {{10.1007/978-981-96-5290-7_29}},
  year         = {{2026}},
}

@inbook{65064,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>}},
  author       = {{Alpsancar, Suzana and Klenk, Michael}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{583--616}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{The Risk of Manipulation and Deception in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_30}},
  year         = {{2026}},
}

@inbook{62709,
  author       = {{Reijers, Wessel and Alpsancar, Suzana}},
  booktitle    = {{Social explainable AI. Communications of NII Shonan Meetings}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{179--195}},
  publisher    = {{Springer}},
  title        = {{{Values and Norms in sXAI}}},
  year         = {{2026}},
}

@book{57167,
  editor       = {{Alpsancar, Suzana and Friedrich, Alexander and Gehring, Petra and Kaminski, Andreas and Nordmann, Alfred}},
  isbn         = {{978-3-7560-1830-7}},
  publisher    = {{Nomos}},
  title        = {{{Jahrbuch Technikphilosophie. Täuschung und Illusion. 10. Jahrgang 2025}}},
  year         = {{2026}},
}

@book{65065,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Social Explainable AI}}},
  doi          = {{10.1007/978-981-96-5290-7_1}},
  year         = {{2026}},
}

@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61321,
  author       = {{Grimminger, Angela and Buschmeier, Hendrik}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{351--365}},
  publisher    = {{Springer}},
  title        = {{{Theoretical aspects of multimodal processing}}},
  doi          = {{10.1007/978-981-96-5290-7_18}},
  year         = {{2026}},
}

@article{65066,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>We investigate whether the recently approved reforms of the apportionment of parliamentary seats to parties in the German Bundestag affects the parties’ political influence measured by power indices. We find that under neither reform the underlying simple game, which describes the possibilities to form governments, remains unchanged and as a result the Shapley-Shubik and the Banzhaf index are unaltered. As a consequence, the major change resulting from the reforms is the reduction of the Bundestag’s size to 630 seats.</jats:p>}},
  author       = {{Duman, Papatya and Haake, Claus-Jochen}},
  issn         = {{0948-5139}},
  journal      = {{Review of Economics}},
  keywords     = {{Bundestag reform, Banzhaf power index, Shapley-Shubik power index}},
  number       = {{3}},
  pages        = {{241--270}},
  publisher    = {{Walter de Gruyter GmbH}},
  title        = {{{A Note on the Size Reduction Reform in the German Parliament: A Game Theoretic Analysis of Power Indices}}},
  doi          = {{10.1515/roe-2024-0048}},
  volume       = {{76}},
  year         = {{2026}},
}

@inbook{64624,
  author       = {{Lehberger, Regine}},
  booktitle    = {{Handlungsorientierung in der Ausbildung von Fachkräften und pädagogischen Fachkräften. Konzeptionen und Forschungsperspektiven}},
  editor       = {{Vogelsang, Christoph and Grotegut, Lea and Bruns, Julia and Riese, Josef and Sabine, Fechner}},
  pages        = {{185--193}},
  publisher    = {{Waxmann}},
  title        = {{{Reflexion von individuellen Selbstregulationsfähigkeiten zur Professionalisierung im bildungswissenschaftlichen Begleitseminar des Praxissemesters}}},
  volume       = {{2}},
  year         = {{2026}},
}

@inbook{61322,
  author       = {{Lazarov, Stefan Teodorov and Tchappi, Igor and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{367--390}},
  publisher    = {{Springer}},
  title        = {{{Characteristics of nonverbal behavior}}},
  doi          = {{10.1007/978-981-96-5290-7_19}},
  year         = {{2026}},
}

@inbook{61324,
  author       = {{Wagner, Petra and Kopp, Stefan}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{433--446}},
  publisher    = {{Springer}},
  title        = {{{Timing and synchronization of multimodal signals in explanations}}},
  doi          = {{10.1007/978-981-96-5290-7_22}},
  year         = {{2026}},
}

@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@inbook{65069,
  author       = {{Främling, Kary and Alami, Rachid and Hulstijn, Joris and Tchappi, Igor and Grimminger, Angela and Wrede, Britta and Buschmeier, Hendrik and Kubler, Sylvain}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  isbn         = {{9789819652891}},
  pages        = {{19--38}},
  publisher    = {{Springer}},
  title        = {{{Scenarios of Social Explainable AI in practice}}},
  doi          = {{10.1007/978-981-96-5290-7_2}},
  year         = {{2026}},
}

@inproceedings{63031,
  author       = {{Menne, Anna Lena and Schulz, Christian}},
  booktitle    = {{HCI International 2026 Posters: 28th International Conference on Human-Computer Interaction, HCI 2026, Montreal, Canada, July 26-31, 2026, Proceedings}},
  editor       = {{Stephanidis, Constantine  and Antona, Margherita and Ntoa, Stavroula and Salvendy, Gavriel}},
  location     = {{Montreal}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Social Context in Human-AI Interaction (HAI): Towards a Theoretical Framework Based on Multi-Perspectival Imaginaries}}},
  year         = {{2026}},
}

@inbook{59747,
  author       = {{Schulz, Christian}},
  booktitle    = {{Tausend Plattformen: Plattformforschung nach dem Digital Services Act}},
  editor       = {{Eickelmann, Jennifer  and Mämecke, Thorben  and Ruschemeier, Hannah and Waldmann, Maximilian}},
  publisher    = {{Büchner Verlag}},
  title        = {{{Das Imaginäre der Plattformen: Soziale Medien als Infrastrukturen der Anerkennung}}},
  year         = {{2026}},
}

@article{64789,
  author       = {{Beer, Fabian and Schulz, Christian}},
  journal      = {{RESET Journal (Recherches sciences sociale sur internet) Special Issue: Towards New Social and Historical Studies of Artificial Intelligence}},
  publisher    = {{Open Edition Journals}},
  title        = {{{AI has never been “inherently interpretable”: On a paradoxical origin of eXplainable AI (XAI)}}},
  year         = {{2026}},
}

@unpublished{65073,
  abstract     = {{We study the large-time behavior of the continuous-time heat kernel and of solutions to the heat equation on homogeneous trees. First, we derive sharp asymptotic formulas for the heat kernel as $t\to\infty$. Second, using them, we show that solutions with initial data in weighted $\ell^1$ classes, asymptotically factorize in $\ell^p$ norms, $p\in[1,\infty]$, as the product of the heat kernel, times a $p$-mass function, dependent on the initial condition and $p$. The  $p$-mass function is described in terms of boundary averages associated with Busemann functions for $p<2$, while for $p\ge 2$, it is expressed through convolution with the ground spherical function. For comparison, the case of the integers shows that a single constant mass determines the asymptotics of solutions to the heat equation for all $p$, emphasizing the influence of the graph geometry on heat diffusion.}},
  author       = {{Papageorgiou, Efthymia}},
  booktitle    = {{2603.11232}},
  title        = {{{Long-time asymptotics for the heat kernel and for heat equation solutions on homogeneous trees}}},
  year         = {{2026}},
}

@inbook{55598,
  author       = {{Schulz, Christian}},
  booktitle    = {{Handbuch Social Media: Geschichte – Kultur – Ästhetik}},
  editor       = {{Dörre, Robert and Tuschling, Anna }},
  publisher    = {{Metzler Verlag}},
  title        = {{{Feeds. Ein zentrales Strukturprinzip sozialer Medien}}},
  year         = {{2026}},
}

@article{65082,
  abstract     = {{<jats:p>Encoding information in molecular arrangements on DNA origami nanostructures (DONs) provides the basis for novel concepts in molecular data storage and computing. To preserve their integrity over long timescales, the information‐carrying DONs are often stored in a frozen state. Here, we investigate the effect of repeated freeze–thaw (F/T) cycles on the structural and functional integrity of DONs carrying biotin (Bt) modifications. Streptavidin (SAv) binding is used to visualize the stored information by atomic force microscopy (AFM) before and after 40 F/T cycles. Two strategies are compared by F/T cycling of (I) SAv‐bound DONs and (II) SAv‐free DONs that are exposed to SAv directly before AFM imaging. Our results reveal that while the DONs retain their overall shape, F/T cycling induces a small amount of damage, leading to slightly reduced SAv binding. Adding glycerol at mM concentrations efficiently protects the DONs and restores the original SAv binding yields. Nevertheless, SAv exposure after F/T cycling leads to slightly higher and more consistent SAv binding yields and a lower background of nonspecifically adsorbed SAv compared to Strategy I. This makes information readout by AFM more efficient and renders Strategy II more convenient for long‐term storage of information‐carrying DONs with repeated information readout.</jats:p>}},
  author       = {{Li, Xinyang and Rabbe, Lukas and Linneweber, Jacqueline and Grundmeier, Guido and Keller, Adrian Clemens}},
  issn         = {{2628-9725}},
  journal      = {{Chemistry–Methods}},
  number       = {{3}},
  publisher    = {{Wiley}},
  title        = {{{Stability of Information‐Carrying DNA Origami Nanostructures During Repeated Freeze–Thaw Cycles}}},
  doi          = {{10.1002/cmtd.202500161}},
  volume       = {{6}},
  year         = {{2026}},
}

@article{65081,
  author       = {{Schwede, Jana}},
  journal      = {{berufsbildung}},
  number       = {{1}},
  pages        = {{44--46}},
  publisher    = {{wbv}},
  title        = {{{Drei Lernorte, (k)ein Zusammenwirken? Lernortkooperation im Spannungsfeld von Anspruch und Wirklichkeit}}},
  doi          = {{https://doi.org/10.3278/BB2601W}},
  volume       = {{80}},
  year         = {{2026}},
}

