@article{62028,
  abstract     = {{Explainable AI (XAI) methods can support the identification of biases in automated decision-making (ADM) systems. However, existing research does not sufficiently address whether these biases originate from the ADM system or mirror underlying societal inequalities. This distinction is important because it has major implications for how to act upon an explanation: while the societal bias produced by the ADM system can be algorithmically fixed, societal inequalities demand societal actions. To address this gap, we propose the RR-XAI-framework (recognition-redistribution through XAI) that builds on a distinction between socio-technical and societal bias and Nancy Fraser's justice theory of recognition and redistribution. In our framework, explanations can play two distinct roles: as a socio-technical diagnosis when they reveal biases produced by the ADM system itself, or as a societal diagnosis when they expose biases that reflect broader societal inequalities. We then outline the operationalization of the framework and discuss its applicability for cases in algorithmic hiring and credit scoring. Based on our findings, we argue that the diagnostic functions of XAI are contingent on the provision of such explanations, the resources of the audiences, as well as the current limits of XAI techniques.}},
  author       = {{Fahimi, Miriam and State, Laura and Kasirzadeh, Atoosa}},
  issn         = {{3065-8365}},
  journal      = {{Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}},
  number       = {{1}},
  pages        = {{879--892}},
  publisher    = {{Association for the Advancement of Artificial Intelligence (AAAI)}},
  title        = {{{From Explaining to Diagnosing: A Justice-Oriented Framework of Explainable AI for Bias Detection}}},
  doi          = {{10.1609/aies.v8i1.36597}},
  volume       = {{8}},
  year         = {{2025}},
}

@inproceedings{62229,
  abstract     = {{In 2024, the EU adopted the AI Act, a new set of rules for trustworthy artificial intelligence. This legal instrument carves a large place for standardisation, a regulatory technique that consists in crafting so-called harmonised technical standards, to facilitate legal compliance by industry stakeholders. While EU technical standards have been used in the past for ensuring product safety, for the first time the AI Act relies on standardisation to facilitate compliance with fundamental rights, including the right to non-discrimination and equality. The attempt to translate inherently open-textured rights and ethical principles into operationalizable standards raises critical questions. In particular, how will standardisation practices under the new EU AI Act affect, transform, contest and stabilise notions of equality and non-discrimination in an increasingly algorithmic society? This paper proposes a research agenda to address this question and unpack the black box of AI standardisation.}},
  author       = {{Xenidis, Raphaële and Fahimi, Miriam}},
  booktitle    = {{Proceedings of Fourth European Workshop on Algorithmic Fairness}},
  pages        = {{310–314}},
  publisher    = {{PMLR}},
  title        = {{{Standardising Equality in the Algorithmic Society? A Research Agenda}}},
  year         = {{2025}},
}

@article{62029,
  author       = {{Fahimi, Miriam and Kinder-Kurlanda, Katharina}},
  issn         = {{2364-2114}},
  journal      = {{Digital Culture & Society}},
  number       = {{2}},
  pages        = {{141--160}},
  publisher    = {{Transcript Verlag}},
  title        = {{{Friction in the Materialities of Value. Relating Transparency, Algorithms and Credit Scoring}}},
  doi          = {{10.14361/dcs-2023-0208}},
  volume       = {{9}},
  year         = {{2025}},
}

@article{62033,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>The literature addressing bias and fairness in AI models (<jats:italic>fair-AI</jats:italic>) is growing at a fast pace, making it difficult for novel researchers and practitioners to have a bird’s-eye view picture of the field. In particular, many policy initiatives, standards, and best practices in fair-AI have been proposed for setting principles, procedures, and knowledge bases to guide and operationalize the management of bias and fairness. The first objective of this paper is to concisely survey the state-of-the-art of fair-AI methods and resources, and the main policies on bias in AI, with the aim of providing such a bird’s-eye guidance for both researchers and practitioners. The second objective of the paper is to contribute to the policy advice and best practices state-of-the-art by leveraging from the results of the NoBIAS research project. We present and discuss a few relevant topics organized around the NoBIAS architecture, which is made up of a Legal Layer, focusing on the European Union context, and a Bias Management Layer, focusing on understanding, mitigating, and accounting for bias.</jats:p>}},
  author       = {{Alvarez, Jose M. and Colmenarejo, Alejandra Bringas and Elobaid, Alaa and Fabbrizzi, Simone and Fahimi, Miriam and Ferrara, Antonio and Ghodsi, Siamak and Mougan, Carlos and Papageorgiou, Ioanna and Reyero, Paula and Russo, Mayra and Scott, Kristen M. and State, Laura and Zhao, Xuan and Ruggieri, Salvatore}},
  issn         = {{1388-1957}},
  journal      = {{Ethics and Information Technology}},
  number       = {{2}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Policy advice and best practices on bias and fairness in AI}}},
  doi          = {{10.1007/s10676-024-09746-w}},
  volume       = {{26}},
  year         = {{2024}},
}

@article{62031,
  abstract     = {{The field of fair AI aims to counter biased algorithms through computational modelling. However, it faces increasing criticism for perpetuating the use of overly technical and reductionist methods. As a result, novel approaches appear in the field to address more socially-oriented and interdisciplinary (SOI) perspectives on fair AI. In this paper, we take this dynamic as the starting point to study the tension between computer science (CS) and SOI research. By drawing on STS and CSCW theory, we position fair AI research as a matter of 'organizational alignment': what makes research 'doable' is the successful alignment of three levels of work organization (the social world, the laboratory, and the experiment). Based on qualitative interviews with CS researchers, we analyze the tasks, resources, and actors required for doable research in the case of fair AI. We find that CS researchers engage with SOI research to some extent, but organizational conditions, articulation work, and ambiguities of the social world constrain the doability of SOI research for them. Based on our findings, we identify and discuss problems for aligning CS and SOI as fair AI continues to evolve.}},
  author       = {{Fahimi, Miriam and Russo, Mayra and Scott, Kristen M. and Vidal, Maria-Esther and Berendt, Bettina and Kinder-Kurlanda, Katharina}},
  issn         = {{2573-0142}},
  journal      = {{Proceedings of the ACM on Human-Computer Interaction}},
  number       = {{CSCW2}},
  pages        = {{1--23}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Articulation Work and Tinkering for Fairness in Machine Learning}}},
  doi          = {{10.1145/3686973}},
  volume       = {{8}},
  year         = {{2024}},
}

@inbook{62228,
  abstract     = {{This chapter highlights the intricate nature of data and their profound social implications. It examines the acts of rendering data visible and the inherent power dynamics and imbalances that accompany such processes. Our dialogue unfolds in three interconnected parts, each focusing on the intersection of in/visibility and power. Part 1 attends to the challenges of producing knowledge about and with data, emphasizing the relativity, fluidity, and instability inherent in data. It explores frameworks that uncover the often invisible infrastructures of algorithms, rendering visible the actors, technologies, and divergent values involved in data manipulation. Part 2 presents empirical case studies that analyse the consequences of data visibility while contemplating the methodological opportunities and challenges of foregrounding the embedded values and norms within data. Part 3 discusses tool-based interventions aimed at bringing alternative data framings and narratives to the fore. It examines the complexities of tracing data across various contexts and the value, utility, and obstacles associated with creating visual representations of data and their flows. By critically engaging with the complexities of data in/visibility, this chapter challenges existing gatekeepers and fosters a deeper understanding of the multifaceted nature of data and its socio-political ramifications.}},
  author       = {{Fahimi, Miriam and Falk, Petter and Gray, Jonathan W. Y. and Jarke, Juliane and Kinder-Kurlanda, Katharina and Light, Evan and McGeachey, Ellouise and Perea, Itzelle Medina and Poechhacker, Nikolaus and Poirier, Lindsay and Röhle, Theo and Sharon, Tamar and Stevens, Marthe and Gastel, Bernard van and White, Quinn and Zakharova, Irina}},
  booktitle    = {{Dialogues in Data Power}},
  isbn         = {{978-1-5292-3832-7}},
  pages        = {{52–79}},
  publisher    = {{Bristol University Press}},
  title        = {{{In/visibilities in Data Studies: Methods, Tools, and Interventions}}},
  year         = {{2024}},
}

@inbook{62230,
  abstract     = {{Algorithms have risen to become one, if not the central technology for producing, circulating, and evaluating knowledge in multiple societal arenas. In this book, scholars from the social sciences, humanities, and computer science argue that this shift has, and will continue to have, profound implications for how knowledge is produced and what and whose knowledge is valued and deemed valid. To attend to this fundamental change, the authors propose the concept of algorithmic regimes and demonstrate how they transform the epistemological, methodological, and political foundations of knowledge production, sensemaking, and decision-making in contemporary societies. Across sixteen chapters, the volume offers a diverse collection of contributions along three perspectives on algorithmic regimes: the methods necessary to research and design algorithmic regimes, the ways in which algorithmic regimes reconfigure sociotechnical interactions, and the politics engrained in algorithmic regimes.}},
  author       = {{Kinder-Kurlanda, Katharina and Fahimi, Miriam}},
  booktitle    = {{Algorithmic Regimes. Methods, Interactions, and Politics.}},
  editor       = {{Jarke, Juliane and Prietl, Bianca and Egbert, Simon and Boeva, Yana and Heuer, Hendrik and Arnold, Maike}},
  isbn         = {{978-94-6372-848-5}},
  pages        = {{309–330}},
  publisher    = {{Amsterdam University Press}},
  title        = {{{Making Algorithms Fair: Ethnographic Insights from Machine Learning Interventions}}},
  year         = {{2024}},
}

@inproceedings{62231,
  abstract     = {{Explainable artificial intelligence (XAI) is a rapidly growing research field that has received a lot of attention during the last few years. An important goal of the field is to use its methods to detect (social) bias and discrimination. Despite these positive intentions, aspects of XAI can be in conflict with feminist approaches and values. Therefore, our conceptual contribution brings forward both a careful assessment of current XAI methods, as well as visions for carefully doing XAI from a feminist perspective. We conclude with a discussion on the possibilities for caring XAI, and the challenges that might lie along the way.}},
  author       = {{State, Laura and Fahimi, Miriam}},
  booktitle    = {{Proceedings of the 2nd European Workshop on Algorithmic Fairness}},
  publisher    = {{CEUR Workshop Proceedings}},
  title        = {{{Careful Explanations: A Feminist Perspective on XAI}}},
  year         = {{2023}},
}

@book{62032,
  editor       = {{Fahimi, Miriam and Flatschart, Elmar and Schaffar, Wolfram}},
  isbn         = {{9783030939991}},
  publisher    = {{Springer International Publishing}},
  title        = {{{State and Statehood in the Global South}}},
  doi          = {{10.1007/978-3-030-94000-3}},
  year         = {{2022}},
}

