@article{24456,
  abstract     = {{One objective of current research in explainable intelligent systems is to implement social aspects in order to increase the relevance of explanations. In this paper, we argue that a novel conceptual framework is needed to overcome shortcomings of existing AI systems with little attention to processes of interaction and learning. Drawing from research in interaction and development, we first outline the novel conceptual framework that pushes the design of AI systems toward true interactivity with an emphasis on the role of the partner and social relevance. We propose that AI systems will be able to provide a meaningful and relevant explanation only if the process of explaining is extended to active contribution of both partners that brings about dynamics that is modulated by different levels of analysis. Accordingly, our conceptual framework comprises monitoring and scaffolding as key concepts and claims that the process of explaining is not only modulated by the interaction between explainee and explainer but is embedded into a larger social context in which conventionalized and routinized behaviors are established. We discuss our conceptual framework in relation to the established objectives of transparency and autonomy that are raised for the design of explainable AI systems currently.}},
  author       = {{Rohlfing, Katharina J. and Cimiano, Philipp and Scharlau, Ingrid and Matzner, Tobias and Buhl, Heike M. and Buschmeier, Hendrik and Esposito, Elena and Grimminger, Angela and Hammer, Barbara and Haeb-Umbach, Reinhold and Horwath, Ilona and Hüllermeier, Eyke and Kern, Friederike and Kopp, Stefan and Thommes, Kirsten and Ngonga Ngomo, Axel-Cyrille and Schulte, Carsten and Wachsmuth, Henning and Wagner, Petra and Wrede, Britta}},
  issn         = {{2379-8920}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  keywords     = {{Explainability, process ofexplaining andunderstanding, explainable artificial systems}},
  number       = {{3}},
  pages        = {{717--728}},
  title        = {{{Explanation as a Social Practice: Toward a Conceptual Framework for the Social Design of AI Systems}}},
  doi          = {{10.1109/tcds.2020.3044366}},
  volume       = {{13}},
  year         = {{2021}},
}

@techreport{51758,
  author       = {{Bush, Annika  and de Gruisbourne, Birte and Matzner, Tobias and Schulz, Christian}},
  title        = {{{Data Literacy: Kompetenzrahmen für Hochschulen }}},
  year         = {{2021}},
}

@inbook{42904,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Autonome Autos – Die Zukunft des Verkehrs und die Dispositive der Mobilität}},
  editor       = {{Sprenger, Florian}},
  publisher    = {{Transcript }},
  title        = {{{Die Fussgänger*innen autonomer Kraftfahrzeuge. Eine informatische Dispositivanalyse}}},
  year         = {{2021}},
}

@book{47618,
  editor       = {{Althoff, Sebastian and Linseisen, Elisa and Müller, Maja-Lisa and Winter, Franziska}},
  isbn         = {{978-3-7705-6495-8}},
  publisher    = {{Wilhelm Fink}},
  title        = {{{Re/Dissolving Mimesis}}},
  year         = {{2020}},
}

@inbook{47623,
  author       = {{Althoff, Sebastian}},
  booktitle    = {{Re/Dissolving Mimesis}},
  editor       = {{Linseisen, Elisa and Müller, Maja-Lisa and Winter, Franziska}},
  publisher    = {{Wilhelm Fink}},
  title        = {{{A CCTV Image that Dissolves like Smeared Data: Distinguishability versus Similarity}}},
  doi          = {{10.30965/9783846764954_012}},
  year         = {{2020}},
}

@inbook{47622,
  author       = {{Althoff, Sebastian and Linseisen, Elisa and Müller, Maja-Lisa and Winter, Franziska}},
  booktitle    = {{Re/Dissolving Mimesis}},
  editor       = {{Linseisen, Elisa and Müller, Maja-Lisa and Winter, Franziska}},
  publisher    = {{Wilhelm Fink}},
  title        = {{{Editorial: Re/Dissolving Mimesis}}},
  doi          = {{10.30965/9783846764954_004}},
  year         = {{2020}},
}

@article{47624,
  author       = {{Althoff, Sebastian}},
  journal      = {{Performance Research}},
  number       = {{7}},
  pages        = {{92--98}},
  title        = {{{Seeping Out: The diminishment of the subject in Hito Steyerl’s How Not to Be Seen}}},
  doi          = {{10.1080/13528165.2019.1717871}},
  volume       = {{24}},
  year         = {{2020}},
}

@article{51753,
  author       = {{Schulz, Christian and Matzner, Tobias}},
  journal      = {{Navigationen – Zeitschrift für Medien- und Kulturwissenschaften }},
  pages        = {{147--164}},
  publisher    = {{Universi Verlag }},
  title        = {{{Feed the Interface – Social-Media-Feeds als Schwellen}}},
  doi          = {{http://dx.doi.org/10.25819/ubsi/5595}},
  volume       = {{2}},
  year         = {{2020}},
}

@article{51754,
  author       = {{Schulz, Christian and Bäcker, Theresia  and Kathöfer, Jasmin }},
  journal      = {{Navigationen – Zeitschrift für Medien- und Kulturwissenschaften }},
  pages        = {{7--22}},
  publisher    = {{Universi Verlag }},
  title        = {{{»The Filter is the Message« oder der entwendete Begriff - Zur Einführung}}},
  volume       = {{2}},
  year         = {{2020}},
}

@misc{51763,
  booktitle    = {{Navigationen – Zeitschrift für Medien- und Kulturwissenschaften }},
  editor       = {{Bäcker , Theresia  and Kathöfer, Jasmin  and Schulz, Christian}},
  pages        = {{200}},
  publisher    = {{Universi Verlag }},
  title        = {{{Filter(n) – Geschichte, Ästhetik, Praktiken }}},
  doi          = {{http://dx.doi.org/10.25819/ubsi/5595}},
  volume       = {{2}},
  year         = {{2020}},
}

@misc{42929,
  author       = {{Matzner, Tobias and Heesen, Jessica and Grundwald, Armin and Roßnagel, Alexander}},
  booktitle    = {{der Plattform Lernende Systeme}},
  title        = {{{Ethik-Briefing. Leitfaden für eine verantwortungsvolle Entwicklung und Anwendung von KI-Systemen}}},
  year         = {{2020}},
}

@misc{42931,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Medium.com}},
  title        = {{{Digital ist anders — Über eine Universität ohne Präsenzlehre}}},
  year         = {{2020}},
}

@inbook{42905,
  author       = {{Matzner, Tobias}},
  booktitle    = {{Die dunklen Seiten des Konsums – Alte Probleme, neue Herausforderungen}},
  editor       = {{Heidbrink, Ludger and Gröppel-Klein, Andrea}},
  publisher    = {{Nomos}},
  title        = {{{Mediensucht? Über die Medikalisierung der Mediennutzung in Suchtdebatten}}},
  year         = {{2020}},
}

@article{28168,
  author       = {{Schulz, Christian and Matzner, Tobias}},
  journal      = {{Navigationen}},
  number       = {{2}},
  title        = {{{Feed the Interface – Social-Media-Feeds und filternde Schwellen}}},
  volume       = {{20}},
  year         = {{2020}},
}

@article{51755,
  author       = {{Schulz, Christian}},
  journal      = {{Medienobservationen }},
  pages        = {{1--16}},
  title        = {{{Mobile Foto-Filter – Von technischer Entstörung und bildästhetischer Aufwertung}}},
  year         = {{2019}},
}

@misc{42937,
  author       = {{Matzner, Tobias and Beck, Susanne and Grundwald, Armin and Jacob, Kai}},
  booktitle    = {{der Plattform Lernende Systeme}},
  title        = {{{Whitepaper Künstliche Intelligenz und Diskriminierung – Herausforderungen und Lösungsansätze}}},
  year         = {{2019}},
}

@article{42930,
  author       = {{Matzner, Tobias}},
  journal      = {{Gehirn & Geist Nr. 1 (2019)}},
  title        = {{{Sinnvoll versus gesund? – Bestimmungen der Mediennutzung}}},
  year         = {{2019}},
}

@inbook{42906,
  author       = {{Matzner, Tobias}},
  booktitle    = {{The Democratization of Artificial Intelligence – Net Politics in the Era of Learning Algorithms}},
  editor       = {{Sudmann, Andreas}},
  publisher    = {{transcript}},
  title        = {{{Plural, situated subjects in the critique of artificial intelligence}}},
  year         = {{2019}},
}

@article{28148,
  abstract     = {{<jats:p> The potential for biases being built into algorithms has been known for some time (e.g., Friedman and Nissenbaum, 1996), yet literature has only recently demonstrated the ways algorithmic profiling can result in social sorting and harm marginalised groups (e.g., Browne, 2015; Eubanks, 2018; Noble, 2018). We contend that with increased algorithmic complexity, biases will become more sophisticated and difficult to identify, control for, or contest. Our argument has four steps: first, we show how harnessing algorithms means that data gathered at a particular place and time relating to specific persons, can be used to build group models applied in different contexts to different persons. Thus, privacy and data protection rights, with their focus on individuals (Coll, 2014; Parsons, 2015), do not protect from the discriminatory potential of algorithmic profiling. Second, we explore the idea that anti-discrimination regulation may be more promising, but acknowledge limitations. Third, we argue that in order to harness anti-discrimination regulation, it needs to confront emergent forms of discrimination or risk creating new invisibilities, including invisibility from existing safeguards. Finally, we outline suggestions to address emergent forms of discrimination and exclusionary invisibilities via intersectional and post-colonial analysis. </jats:p>}},
  author       = {{Mann, Monique and Matzner, Tobias}},
  issn         = {{2053-9517}},
  journal      = {{Big Data & Society}},
  number       = {{2}},
  title        = {{{Challenging algorithmic profiling: The limits of data protection and anti-discrimination in responding to emergent discrimination}}},
  doi          = {{https://doi.org/10.1177/2053951719895805}},
  volume       = {{6}},
  year         = {{2019}},
}

@article{28169,
  author       = {{Matzner, Tobias}},
  journal      = {{Zeitschrift für Medienwissenschaft}},
  pages        = {{46--55}},
  title        = {{{Autonome Trolleys und andere Probleme. Konfigurationen Künstlicher Intelligenz in ethischen Debatten über selbstfahrende Kraftfahrzeuge}}},
  volume       = {{21}},
  year         = {{2019}},
}

