@unpublished{59917,
  abstract     = {{nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.}},
  author       = {{Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}},
  booktitle    = {{arXiv}},
  title        = {{{Healthy Distrust in AI systems}}},
  year         = {{2025}},
}

@article{57531,
  author       = {{Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}},
  journal      = {{AI and Ethics}},
  pages        = {{3015–3033}},
  publisher    = {{Springer}},
  title        = {{{Explanation needs and ethical demands: unpacking the instrumental value of XAI}}},
  doi          = {{https://doi.org/10.1007/s43681-024-00622-3}},
  volume       = {{5}},
  year         = {{2025}},
}

@inbook{62305,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana}},
  booktitle    = {{Digital Development. Technology, Ethics and Governance}},
  editor       = {{Farina, Mirko  and Yu, Xiao  and Chen, Jin}},
  isbn         = {{9781003567622}},
  publisher    = {{Routledge}},
  title        = {{{Explainability and AI Governance}}},
  doi          = {{10.4324/9781003567622-22}},
  year         = {{2025}},
}

@book{51764,
  editor       = {{Adelmann, Ralf and Matzner, Tobias and Miggelbrink, Monique and Schulz, Christian}},
  publisher    = {{MediaRep}},
  title        = {{{Filter – Medienwissenschaftliche Symposien der DFG}}},
  year         = {{2024}},
}

@inproceedings{57172,
  author       = {{Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana and Philippi, Martina}},
  booktitle    = {{Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.}},
  title        = {{{AI explainability, temporality, and civic virtue}}},
  year         = {{2024}},
}

@inproceedings{51752,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing }},
  title        = {{{(De)Coding social practice in the field of XAI: Towards a co-constructive framework of explanations and understanding between lay users and algorithmic systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

@article{42886,
  author       = {{Matzner, Tobias}},
  journal      = {{New Media & Society}},
  title        = {{{Algorithms as complementary abstractions. Published online first 2022}}},
  doi          = {{https://doi.org/10.1177/14614448221078604}},
  year         = {{2022}},
}

@book{42885,
  author       = {{Matzner, Tobias and Robaszkiewicz, Maria}},
  publisher    = {{Springer }},
  title        = {{{Hannah Arendt: Challenges of Plurality}}},
  year         = {{2022}},
}

@inproceedings{39639,
  author       = {{Finke, Josefine and Horwath, Ilona and Matzner, Tobias and Schulz, Christian}},
  booktitle    = {{Artificial Intelligence in HCI}},
  pages        = {{149--160}},
  publisher    = {{Springer International Publishing}},
  title        = {{{(De)Coding Social Practice in the Field of XAI: Towards a Co-constructive Framework of Explanations and Understanding Between Lay Users and Algorithmic Systems}}},
  doi          = {{10.1007/978-3-031-05643-7_10}},
  year         = {{2022}},
}

@article{24456,
  abstract     = {{One objective of current research in explainable intelligent systems is to implement social aspects in order to increase the relevance of explanations. In this paper, we argue that a novel conceptual framework is needed to overcome shortcomings of existing AI systems with little attention to processes of interaction and learning. Drawing from research in interaction and development, we first outline the novel conceptual framework that pushes the design of AI systems toward true interactivity with an emphasis on the role of the partner and social relevance. We propose that AI systems will be able to provide a meaningful and relevant explanation only if the process of explaining is extended to active contribution of both partners that brings about dynamics that is modulated by different levels of analysis. Accordingly, our conceptual framework comprises monitoring and scaffolding as key concepts and claims that the process of explaining is not only modulated by the interaction between explainee and explainer but is embedded into a larger social context in which conventionalized and routinized behaviors are established. We discuss our conceptual framework in relation to the established objectives of transparency and autonomy that are raised for the design of explainable AI systems currently.}},
  author       = {{Rohlfing, Katharina J. and Cimiano, Philipp and Scharlau, Ingrid and Matzner, Tobias and Buhl, Heike M. and Buschmeier, Hendrik and Esposito, Elena and Grimminger, Angela and Hammer, Barbara and Haeb-Umbach, Reinhold and Horwath, Ilona and Hüllermeier, Eyke and Kern, Friederike and Kopp, Stefan and Thommes, Kirsten and Ngonga Ngomo, Axel-Cyrille and Schulte, Carsten and Wachsmuth, Henning and Wagner, Petra and Wrede, Britta}},
  issn         = {{2379-8920}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  keywords     = {{Explainability, process ofexplaining andunderstanding, explainable artificial systems}},
  number       = {{3}},
  pages        = {{717--728}},
  title        = {{{Explanation as a Social Practice: Toward a Conceptual Framework for the Social Design of AI Systems}}},
  doi          = {{10.1109/tcds.2020.3044366}},
  volume       = {{13}},
  year         = {{2021}},
}

@book{50232,
  editor       = {{Robaszkiewicz, Maria Anna and Matzner, Tobias}},
  isbn         = {{9783030817114}},
  issn         = {{2523-8760}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Hannah Arendt: Challenges of Plurality}}},
  doi          = {{10.1007/978-3-030-81712-1}},
  year         = {{2021}},
}

@techreport{51758,
  author       = {{Bush, Annika  and de Gruisbourne, Birte and Matzner, Tobias and Schulz, Christian}},
  title        = {{{Data Literacy: Kompetenzrahmen für Hochschulen }}},
  year         = {{2021}},
}

@inbook{42904,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Autonome Autos – Die Zukunft des Verkehrs und die Dispositive der Mobilität}},
  editor       = {{Sprenger, Florian}},
  publisher    = {{Transcript }},
  title        = {{{Die Fussgänger*innen autonomer Kraftfahrzeuge. Eine informatische Dispositivanalyse}}},
  year         = {{2021}},
}

@inbook{28181,
  author       = {{de Gruisbourne, Birte and Matzner, Tobias}},
  booktitle    = {{Hochschule auf Abstand Ein multiperspektivischer Zugang zur digitalen Lehre}},
  editor       = {{Trier, Ulrike and Schaper, Niclas and Vöing, Nerea and Osthushenrich, Judith and Neiske, Iris}},
  pages        = {{187--200}},
  publisher    = {{transcript}},
  title        = {{{Herausforderungen digitaler Lehre. Eine Perspektive der Care-Ethik}}},
  doi          = {{.14361/9783839456903}},
  year         = {{2021}},
}

@misc{42929,
  author       = {{Matzner, Tobias and Heesen, Jessica and Grundwald, Armin and Roßnagel, Alexander}},
  booktitle    = {{der Plattform Lernende Systeme}},
  title        = {{{Ethik-Briefing. Leitfaden für eine verantwortungsvolle Entwicklung und Anwendung von KI-Systemen}}},
  year         = {{2020}},
}

@misc{42931,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Medium.com}},
  title        = {{{Digital ist anders — Über eine Universität ohne Präsenzlehre}}},
  year         = {{2020}},
}

@inbook{42905,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Die dunklen Seiten des Konsums – Alte Probleme, neue Herausforderungen}},
  editor       = {{Heidbrink, Ludger and Gröppel-Klein, Andrea}},
  publisher    = {{Nomos}},
  title        = {{{Mediensucht? Über die Medikalisierung der Mediennutzung in Suchtdebatten}}},
  year         = {{2020}},
}

@article{28168,
  author       = {{Schulz, Christian and Matzner, Tobias}},
  journal      = {{Navigationen}},
  number       = {{2}},
  title        = {{{Feed the Interface – Social-Media-Feeds und filternde Schwellen}}},
  volume       = {{20}},
  year         = {{2020}},
}

@techreport{42912,
  author       = {{Bohnenkamp, Björn and  Burkhardt, Marcus and  Grashöfer, Katja and  Hlukhovych, Adrianna and Krewani, Angela and Matzner, Tobias and Missomelius, Petra and Shnayien, Marie-Luise}},
  title        = {{{Online-Lehre 2020 – Eine medienwissenschaftliche Perspektive. Diskussionspapier Nr. 10}}},
  doi          = {{https://doi.org/10.25969/mediarep/14846}},
  year         = {{2020}},
}

@misc{42937,
  author       = {{Matzner, Tobias and Beck, Susanne and Grundwald, Armin and Jacob, Kai}},
  booktitle    = {{der Plattform Lernende Systeme}},
  title        = {{{Whitepaper Künstliche Intelligenz und Diskriminierung – Herausforderungen und Lösungsansätze}}},
  year         = {{2019}},
}

