@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@book{63686,
  abstract     = {{Translated from German, The Handbook of Qualitative and Quantitative Content Analysis is a comprehensive handbook which offers an application-orientated introduction to qualitative and quantitative content analysis methods.

The book provides explanations for beginners from bachelor level onwards on how to select an appropriate qualitative or quantitative content analysis method and how to use the chosen method(s) depending on research interest and amount of data. Part 1 defines the basics of qualitative and quantitative content analysis and empirical research, including research quality conventions and how to do interpretation; Part 2 is a practical guide to classical qualitative content analysis and semi-automated quantitative content analysis; and Part 3 introduces Python alongside automated techniques such as correspondence analysis, semantic network analysis, sentiment analysis, and topic modelling using generative and deep learning algorithms. Each of these sections are enriched with extensive examples and cover a range of software applications, including AntConc, MAXQDA, Python, and VosViewer.

This is the ideal resource for anyone interested in content analysis research methods across the social sciences, humanities, and data sciences.}},
  author       = {{Schneijderberg, Christian and Wieczorek, Oliver and Steinhardt, Isabel}},
  isbn         = {{9781003496397}},
  publisher    = {{Routledge}},
  title        = {{{The Handbook of Qualitative and Quantitative Content Analysis}}},
  doi          = {{10.4324/9781003496397}},
  year         = {{2026}},
}

@inbook{63696,
  abstract     = {{Das Kapitel beleuchtet, wie digitale Medien von Jugendlichen genutzt werden und wie diese Nutzung die Entwicklung von Jugendlichen beeinflusst, indem Einblicke in Nutzungsmuster, Chancen und Risiken digitaler Medien sowie in den Stand digitaler Kompetenzen gegeben werden. Zwei zentrale Sozialisationsinstanzen (Familie und Peers) werden genauer beleuchtet. Ziel ist es, pädagogische und gesellschaftliche Strategien zur Förderung digitaler Mündigkeit aufzuzeigen.}},
  author       = {{Kurock, Ricarda and Jungkeim, Lisa and Kuhn, Nicole}},
  booktitle    = {{Handbuch Entwicklungs- und Erziehungspsychologie}},
  editor       = {{Kracke, Bärbel and Noack, Peter }},
  keywords     = {{Mediennutzung, Soziale Medien, Digitale Kompetenzen, Peers, Familie}},
  publisher    = {{Springer}},
  title        = {{{Umgang mit digitalen Medien im Jugendalter}}},
  year         = {{2026}},
}

@article{63777,
  author       = {{ Hallmann, Nicole and Mahs, Marie and Grant , Lloyd-Spencer and Baier, Marla Ricarda and  Cerezo Alarcon, Alina Luisa and Kepper, Florian  and Steinhardt, Isabel}},
  journal      = {{die hochschullehre}},
  title        = {{{Selbstgesteuertes Lernen, Partizipation und das 4K-Modell. Lehre in der Kultur der Digitalität}}},
  doi          = {{10.3278/HSL2602W}},
  year         = {{2026}},
}

@book{63514,
  editor       = {{Hagengruber, Ruth Edith and Wells, Aaron}},
  publisher    = {{Springer}},
  title        = {{{Du Châtelet and Kant. }}},
  year         = {{2026}},
}

@inbook{64178,
  author       = {{Muller, Jil}},
  booktitle    = {{Automata, Cyborgs, and Mutants}},
  editor       = {{Muller, Jil}},
  publisher    = {{Palgrave Macmillan}},
  title        = {{{Introduction: Automata, Cyborgs, and Mutants: Eccentric Bodies from Humanism to Transhumanism}}},
  doi          = {{10.1007/978-3-031-93201-4_1}},
  year         = {{2026}},
}

@inbook{64179,
  author       = {{Muller, Jil}},
  booktitle    = {{Automata, Cyborgs, and Mutants}},
  editor       = {{Muller, Jil}},
  publisher    = {{Palgrave Macmillan}},
  title        = {{{Descartes on Clocks and Automata}}},
  doi          = {{10.1007/978-3-031-93201-4_5}},
  year         = {{2026}},
}

@book{64177,
  editor       = {{Muller, Jil}},
  pages        = {{368}},
  publisher    = {{Palgrave Macmillan}},
  title        = {{{Automata, Cyborgs, and Mutants }}},
  doi          = {{10.1007/978-3-031-93201-4}},
  year         = {{2026}},
}

@inbook{64230,
  author       = {{Zick, Andreas and Diekmann, Isabell}},
  booktitle    = {{The Sage Handbook of Psychological Perspectives on Diversity, Equity, and Inclusion}},
  editor       = {{Esses, Victoria M. and Dovidio, John F. and Jetten, Jolanda and Sekaquaptewa, Denise and West, Keon}},
  isbn         = {{9781529680836}},
  publisher    = {{SAGE Publications Ltd }},
  title        = {{{Anti-Muslim Racism: Facets, Roots, and Consequences}}},
  year         = {{2026}},
}

@inproceedings{64827,
  author       = {{Porwol, Philip Fabian and Körber, Miriam and Kern, Friederike  and Schulte, Carsten and Scharlau, Ingrid}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philip and Paaßen, Benjamin and Vollmer, Anna-Lisa}},
  location     = {{Bielefeld}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Framing what and how to think: Lay people’s metaphors for algorithms}}},
  doi          = {{10.64136/ubio9074}},
  year         = {{2026}},
}

@inproceedings{64872,
  author       = {{Buhl, Heike M. and Fisher, Josephine Beryl and Rohlfing, Katharina J.}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philipp and Paassen, Benjamin and Vollmer, Anna-Lisa}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Cognitive and Interactive Adaptivity to the Explainee in an Explanatory Dialogue: An Experimental Study}}},
  doi          = {{10.64136/gumb4700}},
  year         = {{2026}},
}

@book{63515,
  editor       = {{Hagengruber, Ruth Edith}},
  publisher    = {{Springer}},
  title        = {{{The History of Women Philosophers. Festschrift in Honour of Mary Ellen Waithe.}}},
  volume       = {{26}},
  year         = {{2026}},
}

@article{63516,
  author       = {{Hagengruber, Ruth Edith}},
  journal      = {{Diogenes}},
  title        = {{{1000 Places in 1 World. How Women Philosophers are changing the way we see the world and understand its history}}},
  year         = {{2026}},
}

@article{63517,
  author       = {{Hagengruber, Ruth Edith}},
  journal      = {{Agora 42}},
  title        = {{{Cyborgs um uns herum. Der Traum vom neuen Menschen zwischen Macht, Technik und gesellschaftlichem Umbruch}}},
  year         = {{2026}},
}

@book{60934,
  author       = {{Du Châtelet, Émilie and Hagengruber, Ruth Edith}},
  publisher    = {{De Gruyter}},
  title        = {{{Naturlehre}}},
  year         = {{2026}},
}

@inbook{65061,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.
                    <jats:ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001" ext-link-type="uri">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>
                    ) and offer three structures that can help to organize responsibility for
                    <jats:italic>decisions made</jats:italic>
                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.
                  </jats:p>}},
  author       = {{Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{157--177}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Responsibilities in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_9}},
  year         = {{2026}},
}

@inbook{65063,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to
                    <jats:italic>tasking AI fairly</jats:italic>
                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.
                  </jats:p>}},
  author       = {{Alpsancar, Suzana and Stamboliev, Eugenia}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{557--581}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}}},
  doi          = {{10.1007/978-981-96-5290-7_29}},
  year         = {{2026}},
}

@inbook{65064,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>}},
  author       = {{Alpsancar, Suzana and Klenk, Michael}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{583--616}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{The Risk of Manipulation and Deception in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_30}},
  year         = {{2026}},
}

@inbook{62709,
  author       = {{Reijers, Wessel and Alpsancar, Suzana}},
  booktitle    = {{Social explainable AI. Communications of NII Shonan Meetings}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{179--195}},
  publisher    = {{Springer}},
  title        = {{{Values and Norms in sXAI}}},
  year         = {{2026}},
}

@book{65065,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Social Explainable AI}}},
  doi          = {{10.1007/978-981-96-5290-7_1}},
  year         = {{2026}},
}

