[{"language":[{"iso":"eng"}],"department":[{"_id":"26"},{"_id":"756"}],"user_id":"93637","_id":"65061","project":[{"_id":"109","name":"TRR 318: Erklärbarkeit konstruieren"},{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"status":"public","abstract":[{"lang":"eng","text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>\r\n                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.\r\n                    <jats:ext-link xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001\" ext-link-type=\"uri\">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>\r\n                    ) and offer three structures that can help to organize responsibility for\r\n                    <jats:italic>decisions made</jats:italic>\r\n                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.\r\n                  </jats:p>"}],"publication":"Social Explainable AI","type":"book_chapter","doi":"10.1007/978-981-96-5290-7_9","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1007/978-981-96-5290-7_9"}],"title":"Responsibilities in sXAI","author":[{"first_name":"Katharina J.","last_name":"Rohlfing","orcid":"0000-0002-5676-8233","id":"50352","full_name":"Rohlfing, Katharina J."},{"first_name":"Suzana","last_name":"Alpsancar","id":"93637","full_name":"Alpsancar, Suzana"},{"first_name":"Carsten","last_name":"Schulte","id":"60311","full_name":"Schulte, Carsten"}],"date_created":"2026-03-19T10:59:18Z","date_updated":"2026-03-19T11:53:01Z","oa":"1","publisher":"Springer Nature Singapore","page":"157-177","citation":{"ieee":"K. J. Rohlfing, S. Alpsancar, and C. Schulte, “Responsibilities in sXAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 157–177.","chicago":"Rohlfing, Katharina J., Suzana Alpsancar, and Carsten Schulte. “Responsibilities in SXAI.” In <i>Social Explainable AI</i>, 157–77. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">https://doi.org/10.1007/978-981-96-5290-7_9</a>.","ama":"Rohlfing KJ, Alpsancar S, Schulte C. Responsibilities in sXAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:157-177. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>","apa":"Rohlfing, K. J., Alpsancar, S., &#38; Schulte, C. (2026). Responsibilities in sXAI. In <i>Social Explainable AI</i> (pp. 157–177). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">https://doi.org/10.1007/978-981-96-5290-7_9</a>","mla":"Rohlfing, Katharina J., et al. “Responsibilities in SXAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 157–77, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>.","bibtex":"@inbook{Rohlfing_Alpsancar_Schulte_2026, place={Singapore}, title={Responsibilities in sXAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}, year={2026}, pages={157–177} }","short":"K.J. Rohlfing, S. Alpsancar, C. Schulte, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 157–177."},"year":"2026","place":"Singapore","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"publication_status":"published"},{"title":"Tasking AI Fairly. How to Empower AI Practitioners With sXAI?","date_created":"2026-03-19T11:03:30Z","publisher":"Springer Nature Singapore","year":"2026","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>\r\n                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to\r\n                    <jats:italic>tasking AI fairly</jats:italic>\r\n                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.\r\n                  </jats:p>"}],"publication":"Social Explainable AI","doi":"10.1007/978-981-96-5290-7_29","main_file_link":[{"url":"https://doi.org/10.1007/978-981-96-5290-7_29","open_access":"1"}],"author":[{"first_name":"Suzana","full_name":"Alpsancar, Suzana","id":"93637","last_name":"Alpsancar"},{"first_name":"Eugenia","last_name":"Stamboliev","full_name":"Stamboliev, Eugenia"}],"date_updated":"2026-03-19T11:53:42Z","oa":"1","page":"557-581","citation":{"ieee":"S. Alpsancar and E. Stamboliev, “Tasking AI Fairly. How to Empower AI Practitioners With sXAI?,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 557–581.","chicago":"Alpsancar, Suzana, and Eugenia Stamboliev. “Tasking AI Fairly. How to Empower AI Practitioners With SXAI?” In <i>Social Explainable AI</i>, 557–81. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">https://doi.org/10.1007/978-981-96-5290-7_29</a>.","ama":"Alpsancar S, Stamboliev E. Tasking AI Fairly. How to Empower AI Practitioners With sXAI? In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:557-581. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>","apa":"Alpsancar, S., &#38; Stamboliev, E. (2026). Tasking AI Fairly. How to Empower AI Practitioners With sXAI? In <i>Social Explainable AI</i> (pp. 557–581). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">https://doi.org/10.1007/978-981-96-5290-7_29</a>","mla":"Alpsancar, Suzana, and Eugenia Stamboliev. “Tasking AI Fairly. How to Empower AI Practitioners With SXAI?” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 557–81, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>.","short":"S. Alpsancar, E. Stamboliev, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 557–581.","bibtex":"@inbook{Alpsancar_Stamboliev_2026, place={Singapore}, title={Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Alpsancar, Suzana and Stamboliev, Eugenia}, year={2026}, pages={557–581} }"},"place":"Singapore","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"publication_status":"published","department":[{"_id":"26"},{"_id":"756"}],"user_id":"93637","_id":"65063","project":[{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"status":"public","type":"book_chapter"},{"date_created":"2026-03-19T11:05:30Z","author":[{"first_name":"Suzana","id":"93637","full_name":"Alpsancar, Suzana","last_name":"Alpsancar"},{"first_name":"Michael","full_name":"Klenk, Michael","last_name":"Klenk"}],"publisher":"Springer Nature Singapore","oa":"1","date_updated":"2026-03-19T11:52:00Z","main_file_link":[{"url":" https://doi.org/10.1007/978-981-96-5290-7_30","open_access":"1"}],"doi":"10.1007/978-981-96-5290-7_30","title":"The Risk of Manipulation and Deception in sXAI","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"citation":{"ama":"Alpsancar S, Klenk M. The Risk of Manipulation and Deception in sXAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:583-616. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>","ieee":"S. Alpsancar and M. Klenk, “The Risk of Manipulation and Deception in sXAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 583–616.","chicago":"Alpsancar, Suzana, and Michael Klenk. “The Risk of Manipulation and Deception in SXAI.” In <i>Social Explainable AI</i>, 583–616. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">https://doi.org/10.1007/978-981-96-5290-7_30</a>.","mla":"Alpsancar, Suzana, and Michael Klenk. “The Risk of Manipulation and Deception in SXAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 583–616, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>.","short":"S. Alpsancar, M. Klenk, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 583–616.","bibtex":"@inbook{Alpsancar_Klenk_2026, place={Singapore}, title={The Risk of Manipulation and Deception in sXAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Alpsancar, Suzana and Klenk, Michael}, year={2026}, pages={583–616} }","apa":"Alpsancar, S., &#38; Klenk, M. (2026). The Risk of Manipulation and Deception in sXAI. In <i>Social Explainable AI</i> (pp. 583–616). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">https://doi.org/10.1007/978-981-96-5290-7_30</a>"},"page":"583-616","place":"Singapore","year":"2026","user_id":"93637","department":[{"_id":"26"},{"_id":"756"}],"project":[{"name":"TRR 318: Erklärbarkeit konstruieren","_id":"109"},{"_id":"370","name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI"}],"_id":"65064","language":[{"iso":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>","lang":"eng"}]},{"language":[{"iso":"eng"}],"project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"_id":"60820","user_id":"100856","status":"public","type":"conference_abstract","title":"Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation","conference":{"location":"BTU Cottbus-Senftenberg, Cottbus","end_date":"2025-03-21","start_date":"2025-03-20","name":"Sorbische Lebenswelten im digitalen Zeitalter"},"date_updated":"2025-07-29T15:11:57Z","author":[{"first_name":"Martina","id":"100856","full_name":"Philippi, Martina","last_name":"Philippi"}],"date_created":"2025-07-29T14:06:58Z","year":"2025","citation":{"short":"M. Philippi, in: 2025.","bibtex":"@inproceedings{Philippi_2025, title={Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation}, author={Philippi, Martina}, year={2025} }","mla":"Philippi, Martina. <i>Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven Auf Die Digitale Transformation</i>. 2025.","apa":"Philippi, M. (2025). <i>Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation</i>. Sorbische Lebenswelten im digitalen Zeitalter, BTU Cottbus-Senftenberg, Cottbus.","chicago":"Philippi, Martina. “Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven Auf Die Digitale Transformation,” 2025.","ieee":"M. Philippi, “Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation,” presented at the Sorbische Lebenswelten im digitalen Zeitalter, BTU Cottbus-Senftenberg, Cottbus, 2025.","ama":"Philippi M. Die Ambivalenz von Sichtbarkeit. Ethische Perspektiven auf die digitale Transformation. In: ; 2025."},"related_material":{"link":[{"relation":"confirmation","url":"https://www.serbski-institut.de/sorbische-lebenswelten-im-digitalen-zeitalter/"}]}},{"language":[{"iso":"eng"}],"user_id":"100856","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"_id":"60821","status":"public","type":"conference_abstract","conference":{"start_date":"2025-01-10","name":"Hermeneutik - oder: Was heißt \"Verstehen\"? Januartagung der Evangelischen Forschungsakademie","location":"Berlin","end_date":"2025-01-12"},"title":"Grenzen des Verstehens","author":[{"first_name":"Martina","last_name":"Philippi","id":"100856","full_name":"Philippi, Martina"}],"date_created":"2025-07-29T14:10:40Z","date_updated":"2025-07-29T15:11:51Z","citation":{"apa":"Philippi, M. (2025). <i>Grenzen des Verstehens</i>. Hermeneutik - oder: Was heißt “Verstehen”? Januartagung der Evangelischen Forschungsakademie, Berlin.","short":"M. Philippi, in: 2025.","mla":"Philippi, Martina. <i>Grenzen Des Verstehens</i>. 2025.","bibtex":"@inproceedings{Philippi_2025, title={Grenzen des Verstehens}, author={Philippi, Martina}, year={2025} }","ieee":"M. Philippi, “Grenzen des Verstehens,” presented at the Hermeneutik - oder: Was heißt “Verstehen”? Januartagung der Evangelischen Forschungsakademie, Berlin, 2025.","chicago":"Philippi, Martina. “Grenzen Des Verstehens,” 2025.","ama":"Philippi M. Grenzen des Verstehens. In: ; 2025."},"year":"2025","related_material":{"link":[{"relation":"confirmation","url":"https://yuus.de/tagungen/"}]}},{"status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n          <jats:p>It has become a new global trend that governments are partially automating decision-making processes by public agencies. This, however, has led to some scandals revealing grave injustices, including the Robodebt scandal in Australia and the childcare benefit scandal in the Netherlands. This chapter argues that the normative impacts of the move towards automated decision-making can be fruitfully understood and addressed through the lens of civic virtue. It starts by outlining the Dutch childcare benefit scandal, showing that what happened cannot be reduced solely to human intent or structural factors, but needs to address the in-between term of human moral dispositions. Following this insight, the chapter outlines a framework of civic virtue, which outlines ideal states (civic virtue) and their deviations (civic vice) for different temporal configurations (past-, present-, and future-oriented civic virtue). Finally, the chapter uses this framework to reflect on the much-touted principle of ‘explainability’ in addressing harms like the ones done to citizens in the childcare benefit scandal. Three impacts are laid bare through the lens of civic virtue, of servility, presumptuousness, and political recalcitrance. Explainability, the chapter argues, may successfully address these impacts, but only if it ceases to focus solely on narrow black box problems in AI and address public governance more holistically.</jats:p>","lang":"eng"}],"publication":"Public Governance and Emerging Technologies","type":"book_chapter","language":[{"iso":"eng"}],"user_id":"102524","_id":"60234","project":[{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"citation":{"ama":"Reijers W. Civic Vice in Digital Governance. In: <i>Public Governance and Emerging Technologies</i>. Springer Nature Switzerland; 2025. doi:<a href=\"https://doi.org/10.1007/978-3-031-84748-6_14\">10.1007/978-3-031-84748-6_14</a>","ieee":"W. Reijers, “Civic Vice in Digital Governance,” in <i>Public Governance and Emerging Technologies</i>, Cham: Springer Nature Switzerland, 2025.","chicago":"Reijers, Wessel. “Civic Vice in Digital Governance.” In <i>Public Governance and Emerging Technologies</i>. Cham: Springer Nature Switzerland, 2025. <a href=\"https://doi.org/10.1007/978-3-031-84748-6_14\">https://doi.org/10.1007/978-3-031-84748-6_14</a>.","bibtex":"@inbook{Reijers_2025, place={Cham}, title={Civic Vice in Digital Governance}, DOI={<a href=\"https://doi.org/10.1007/978-3-031-84748-6_14\">10.1007/978-3-031-84748-6_14</a>}, booktitle={Public Governance and Emerging Technologies}, publisher={Springer Nature Switzerland}, author={Reijers, Wessel}, year={2025} }","short":"W. Reijers, in: Public Governance and Emerging Technologies, Springer Nature Switzerland, Cham, 2025.","mla":"Reijers, Wessel. “Civic Vice in Digital Governance.” <i>Public Governance and Emerging Technologies</i>, Springer Nature Switzerland, 2025, doi:<a href=\"https://doi.org/10.1007/978-3-031-84748-6_14\">10.1007/978-3-031-84748-6_14</a>.","apa":"Reijers, W. (2025). Civic Vice in Digital Governance. In <i>Public Governance and Emerging Technologies</i>. Springer Nature Switzerland. <a href=\"https://doi.org/10.1007/978-3-031-84748-6_14\">https://doi.org/10.1007/978-3-031-84748-6_14</a>"},"year":"2025","place":"Cham","quality_controlled":"1","publication_identifier":{"isbn":["9783031847479","9783031847486"]},"publication_status":"published","doi":"10.1007/978-3-031-84748-6_14","title":"Civic Vice in Digital Governance","date_created":"2025-06-16T20:37:16Z","author":[{"full_name":"Reijers, Wessel","last_name":"Reijers","first_name":"Wessel"}],"date_updated":"2025-09-22T10:04:39Z","publisher":"Springer Nature Switzerland"},{"title":"Introduction to the Ethics of Emerging Technologies","date_updated":"2025-09-22T09:21:40Z","author":[{"first_name":"Wessel","full_name":"Reijers, Wessel","id":"102524","orcid":"0000-0003-2505-1587","last_name":"Reijers"},{"first_name":"Mark ","full_name":"Thomas Young, Mark ","last_name":"Thomas Young"},{"first_name":"Mark ","full_name":"Coeckelbergh, Mark ","last_name":"Coeckelbergh"}],"date_created":"2025-06-16T20:38:04Z","year":"2025","citation":{"apa":"Reijers, W., Thomas Young, M., &#38; Coeckelbergh, M. (2025). <i>Introduction to the Ethics of Emerging Technologies</i>.","mla":"Reijers, Wessel, et al. <i>Introduction to the Ethics of Emerging Technologies</i>. 2025.","short":"W. Reijers, M. Thomas Young, M. Coeckelbergh, Introduction to the Ethics of Emerging Technologies, 2025.","bibtex":"@book{Reijers_Thomas Young_Coeckelbergh_2025, title={Introduction to the Ethics of Emerging Technologies}, author={Reijers, Wessel and Thomas Young, Mark  and Coeckelbergh, Mark }, year={2025} }","ama":"Reijers W, Thomas Young M, Coeckelbergh M. <i>Introduction to the Ethics of Emerging Technologies</i>.; 2025.","ieee":"W. Reijers, M. Thomas Young, and M. Coeckelbergh, <i>Introduction to the Ethics of Emerging Technologies</i>. 2025.","chicago":"Reijers, Wessel, Mark  Thomas Young, and Mark  Coeckelbergh. <i>Introduction to the Ethics of Emerging Technologies</i>, 2025."},"language":[{"iso":"eng"}],"project":[{"_id":"370","name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI"}],"_id":"60235","user_id":"102524","status":"public","type":"book"},{"project":[{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"_id":"60233","user_id":"102524","language":[{"iso":"eng"}],"type":"journal_article","publication":"Regulation & Governance","abstract":[{"lang":"eng","text":"<jats:title>ABSTRACT</jats:title><jats:p>Emerging technologies pose many new challenges for regulation and governance on a global scale. With the advent of distributed communication networks like the Internet and decentralized ledger technologies like blockchain, new platforms emerged, disrupting existing power dynamics and bringing about new claims of sovereignty from the private sector. This special issue addresses a gap in the literature by focusing the discourse on the issue of <jats:italic>trust</jats:italic> and <jats:italic>confidence</jats:italic> in the digital realm. In particular, looking at the evolution of the web (from Web 1.0, to Web 2.0, and then Web 3), this article analyses how every iteration reflects a different way of dealing with the problem of <jats:italic>trust</jats:italic> online, resulting in a different regulation and governance landscape. Technology is often regarded as a new lever of regulation, attempting to resolve the problem of “trust” online, either through the introduction of a new trusted authority (Web 2.0) or through the introduction of technological guarantees that provide more assurance—or “confidence”—in the way interactions can be operationalized (Web 3). Yet, each of these technologies also introduce new risks and governance costs, ultimately shifting the problem of trust in a new direction rather than resolving it or removing the need for trust altogether. The main contribution of the articles in this special issue is providing a better understanding of the trust challenges faced and posed by emerging technologies and demonstrating how they affect institutional governance—in both theory and practice—with a view to help policymakers find appropriate answers to these challenges.</jats:p>"}],"status":"public","oa":"1","publisher":"Wiley","date_updated":"2025-09-22T10:08:25Z","author":[{"first_name":"Primavera","full_name":"de Filippi, Primavera","last_name":"de Filippi"},{"full_name":"Mannan, Morshed","last_name":"Mannan","first_name":"Morshed"},{"full_name":"Reijers, Wessel","last_name":"Reijers","first_name":"Wessel"}],"date_created":"2025-06-16T20:36:42Z","title":"How to Govern the Confidence Machine?","main_file_link":[{"url":"https://www.pure.ed.ac.uk/ws/portalfiles/portal/498727818/deFilippiEtal2025RGHowToGovern.pdf","open_access":"1"}],"doi":"10.1111/rego.70017","publication_status":"published","quality_controlled":"1","publication_identifier":{"issn":["1748-5983","1748-5991"]},"year":"2025","citation":{"apa":"de Filippi, P., Mannan, M., &#38; Reijers, W. (2025). How to Govern the Confidence Machine? <i>Regulation &#38; Governance</i>. <a href=\"https://doi.org/10.1111/rego.70017\">https://doi.org/10.1111/rego.70017</a>","bibtex":"@article{de Filippi_Mannan_Reijers_2025, title={How to Govern the Confidence Machine?}, DOI={<a href=\"https://doi.org/10.1111/rego.70017\">10.1111/rego.70017</a>}, journal={Regulation &#38; Governance}, publisher={Wiley}, author={de Filippi, Primavera and Mannan, Morshed and Reijers, Wessel}, year={2025} }","mla":"de Filippi, Primavera, et al. “How to Govern the Confidence Machine?” <i>Regulation &#38; Governance</i>, Wiley, 2025, doi:<a href=\"https://doi.org/10.1111/rego.70017\">10.1111/rego.70017</a>.","short":"P. de Filippi, M. Mannan, W. Reijers, Regulation &#38; Governance (2025).","chicago":"Filippi, Primavera de, Morshed Mannan, and Wessel Reijers. “How to Govern the Confidence Machine?” <i>Regulation &#38; Governance</i>, 2025. <a href=\"https://doi.org/10.1111/rego.70017\">https://doi.org/10.1111/rego.70017</a>.","ieee":"P. de Filippi, M. Mannan, and W. Reijers, “How to Govern the Confidence Machine?,” <i>Regulation &#38; Governance</i>, 2025, doi: <a href=\"https://doi.org/10.1111/rego.70017\">10.1111/rego.70017</a>.","ama":"de Filippi P, Mannan M, Reijers W. How to Govern the Confidence Machine? <i>Regulation &#38; Governance</i>. Published online 2025. doi:<a href=\"https://doi.org/10.1111/rego.70017\">10.1111/rego.70017</a>"}},{"citation":{"chicago":"Paaßen, Benjamin, Suzana Alpsancar, Tobias Matzner, and Ingrid Scharlau. “Healthy Distrust in AI Systems.” <i>ArXiv</i>, 2025.","ieee":"B. Paaßen, S. Alpsancar, T. Matzner, and I. Scharlau, “Healthy Distrust in AI systems,” <i>arXiv</i>. 2025.","ama":"Paaßen B, Alpsancar S, Matzner T, Scharlau I. Healthy Distrust in AI systems. <i>arXiv</i>. Published online 2025.","apa":"Paaßen, B., Alpsancar, S., Matzner, T., &#38; Scharlau, I. (2025). Healthy Distrust in AI systems. In <i>arXiv</i>.","short":"B. Paaßen, S. Alpsancar, T. Matzner, I. Scharlau, ArXiv (2025).","bibtex":"@article{Paaßen_Alpsancar_Matzner_Scharlau_2025, title={Healthy Distrust in AI systems}, journal={arXiv}, author={Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}, year={2025} }","mla":"Paaßen, Benjamin, et al. “Healthy Distrust in AI Systems.” <i>ArXiv</i>, 2025."},"year":"2025","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2505.09747"}],"title":"Healthy Distrust in AI systems","date_created":"2025-05-16T09:39:13Z","author":[{"first_name":"Benjamin","full_name":"Paaßen, Benjamin","last_name":"Paaßen"},{"first_name":"Suzana","id":"93637","full_name":"Alpsancar, Suzana","last_name":"Alpsancar"},{"first_name":"Tobias","full_name":"Matzner, Tobias","id":"65695","last_name":"Matzner"},{"first_name":"Ingrid","full_name":"Scharlau, Ingrid","id":"451","last_name":"Scharlau","orcid":"0000-0003-2364-9489"}],"date_updated":"2025-11-18T09:38:01Z","oa":"1","status":"public","abstract":[{"lang":"eng","text":"nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \\emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy."}],"type":"preprint","publication":"arXiv","language":[{"iso":"eng"}],"user_id":"93637","department":[{"_id":"424"},{"_id":"26"},{"_id":"756"}],"project":[{"_id":"122","name":"TRR 318 - B3: TRR 318 - Subproject B3"},{"_id":"124","name":"TRR 318 - C1: TRR 318 - Subproject C1 - Gesundes Misstrauen in Erklärungen"},{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370"}],"_id":"59917"},{"title":"Explanation needs and ethical demands: unpacking the instrumental value of XAI","publisher":"Springer","date_created":"2024-12-02T08:32:00Z","year":"2025","language":[{"iso":"eng"}],"publication":"AI and Ethics","doi":"https://doi.org/10.1007/s43681-024-00622-3","main_file_link":[{"open_access":"1"}],"oa":"1","date_updated":"2025-11-25T21:27:44Z","volume":5,"author":[{"first_name":"Suzana","last_name":"Alpsancar","id":"93637","full_name":"Alpsancar, Suzana"},{"last_name":"Buhl","full_name":"Buhl, Heike M.","id":"27152","first_name":"Heike M."},{"last_name":"Matzner","id":"65695","full_name":"Matzner, Tobias","first_name":"Tobias"},{"first_name":"Ingrid","id":"451","full_name":"Scharlau, Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau"}],"intvolume":"         5","page":"3015–3033","citation":{"ieee":"S. Alpsancar, H. M. Buhl, T. Matzner, and I. Scharlau, “Explanation needs and ethical demands: unpacking the instrumental value of XAI,” <i>AI and Ethics</i>, vol. 5, pp. 3015–3033, 2025, doi: <a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>.","chicago":"Alpsancar, Suzana, Heike M. Buhl, Tobias Matzner, and Ingrid Scharlau. “Explanation Needs and Ethical Demands: Unpacking the Instrumental Value of XAI.” <i>AI and Ethics</i> 5 (2025): 3015–3033. <a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>.","ama":"Alpsancar S, Buhl HM, Matzner T, Scharlau I. Explanation needs and ethical demands: unpacking the instrumental value of XAI. <i>AI and Ethics</i>. 2025;5:3015–3033. doi:<a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>","apa":"Alpsancar, S., Buhl, H. M., Matzner, T., &#38; Scharlau, I. (2025). Explanation needs and ethical demands: unpacking the instrumental value of XAI. <i>AI and Ethics</i>, <i>5</i>, 3015–3033. <a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>","bibtex":"@article{Alpsancar_Buhl_Matzner_Scharlau_2025, title={Explanation needs and ethical demands: unpacking the instrumental value of XAI}, volume={5}, DOI={<a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>}, journal={AI and Ethics}, publisher={Springer}, author={Alpsancar, Suzana and Buhl, Heike M. and Matzner, Tobias and Scharlau, Ingrid}, year={2025}, pages={3015–3033} }","mla":"Alpsancar, Suzana, et al. “Explanation Needs and Ethical Demands: Unpacking the Instrumental Value of XAI.” <i>AI and Ethics</i>, vol. 5, Springer, 2025, pp. 3015–3033, doi:<a href=\"https://doi.org/10.1007/s43681-024-00622-3\">https://doi.org/10.1007/s43681-024-00622-3</a>.","short":"S. Alpsancar, H.M. Buhl, T. Matzner, I. Scharlau, AI and Ethics 5 (2025) 3015–3033."},"publication_status":"published","related_material":{"link":[{"url":"https://links.springernature.com/f/a/xjbXcT06ufIgbHT1duGaHQ~~/AABE5gA~/RgRpMhXcP0SiaHR0cHM6Ly9saW5rLnNwcmluZ2VyLmNvbS8xMC4xMDA3L3M0MzY4MS0wMjQtMDA2MjItMz91dG1fc291cmNlPXJjdF9jb25ncmF0ZW1haWx0JnV0bV9tZWRpdW09ZW1haWwmdXRtX2NhbXBhaWduPW9hXzIwMjQxMjAzJnV0bV9jb250ZW50PTEwLjEwMDcvczQzNjgxLTAyNC0wMDYyMi0zVwNzcGNCCmdG3JBPZxsDc2FSIXN1emFuYS5hbHBzYW5jYXJAdW5pLXBhZGVyYm9ybi5kZVgEAAAHLA~~","relation":"confirmation"}]},"_id":"57531","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370"},{"name":"TRR 318 - A01: TRR 318 - Adaptives Erklären (Teilprojekt A01)","_id":"111"},{"_id":"114","name":"TRR 318 - A04: TRR 318 - Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten (Teilprojekt A04)"}],"department":[{"_id":"756"},{"_id":"26"}],"user_id":"93637","status":"public","type":"journal_article"},{"language":[{"iso":"eng"}],"user_id":"93637","department":[{"_id":"26"},{"_id":"756"},{"_id":"660"}],"project":[{"_id":"370","name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI"}],"_id":"62305","status":"public","editor":[{"first_name":"Mirko ","full_name":"Farina, Mirko ","last_name":"Farina"},{"first_name":"Xiao ","full_name":"Yu, Xiao ","last_name":"Yu"},{"full_name":"Chen, Jin","last_name":"Chen","first_name":"Jin"}],"type":"book_chapter","publication":"Digital Development. Technology, Ethics and Governance","doi":"10.4324/9781003567622-22","title":"Explainability and AI Governance","author":[{"orcid":"0000-0003-2505-1587","last_name":"Reijers","id":"102524","full_name":"Reijers, Wessel","first_name":"Wessel"},{"first_name":"Tobias","last_name":"Matzner","id":"65695","full_name":"Matzner, Tobias"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana","id":"93637"}],"date_created":"2025-11-25T17:58:04Z","date_updated":"2025-11-25T21:25:31Z","publisher":"Routledge","citation":{"short":"W. Reijers, T. Matzner, S. Alpsancar, in: M. Farina, X. Yu, J. Chen (Eds.), Digital Development. Technology, Ethics and Governance, Routledge, New York, 2025.","mla":"Reijers, Wessel, et al. “Explainability and AI Governance.” <i>Digital Development. Technology, Ethics and Governance</i>, edited by Mirko  Farina et al., Routledge, 2025, doi:<a href=\"https://doi.org/10.4324/9781003567622-22\">10.4324/9781003567622-22</a>.","bibtex":"@inbook{Reijers_Matzner_Alpsancar_2025, place={New York}, title={Explainability and AI Governance}, DOI={<a href=\"https://doi.org/10.4324/9781003567622-22\">10.4324/9781003567622-22</a>}, booktitle={Digital Development. Technology, Ethics and Governance}, publisher={Routledge}, author={Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana}, editor={Farina, Mirko  and Yu, Xiao  and Chen, Jin}, year={2025} }","apa":"Reijers, W., Matzner, T., &#38; Alpsancar, S. (2025). Explainability and AI Governance. In M. Farina, X. Yu, &#38; J. Chen (Eds.), <i>Digital Development. Technology, Ethics and Governance</i>. Routledge. <a href=\"https://doi.org/10.4324/9781003567622-22\">https://doi.org/10.4324/9781003567622-22</a>","chicago":"Reijers, Wessel, Tobias Matzner, and Suzana Alpsancar. “Explainability and AI Governance.” In <i>Digital Development. Technology, Ethics and Governance</i>, edited by Mirko  Farina, Xiao  Yu, and Jin Chen. New York: Routledge, 2025. <a href=\"https://doi.org/10.4324/9781003567622-22\">https://doi.org/10.4324/9781003567622-22</a>.","ieee":"W. Reijers, T. Matzner, and S. Alpsancar, “Explainability and AI Governance,” in <i>Digital Development. Technology, Ethics and Governance</i>, M. Farina, X. Yu, and J. Chen, Eds. New York: Routledge, 2025.","ama":"Reijers W, Matzner T, Alpsancar S. Explainability and AI Governance. In: Farina M, Yu X, Chen J, eds. <i>Digital Development. Technology, Ethics and Governance</i>. Routledge; 2025. doi:<a href=\"https://doi.org/10.4324/9781003567622-22\">10.4324/9781003567622-22</a>"},"place":"New York","year":"2025","publication_status":"published","publication_identifier":{"isbn":["9781003567622"]}},{"type":"book_chapter","publication":" Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht","status":"public","editor":[{"full_name":"Adolphi, Rainer","last_name":"Adolphi","first_name":"Rainer"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana"},{"full_name":"Hahn, Susanne","last_name":"Hahn","first_name":"Susanne"},{"first_name":"Matthias","last_name":"Kettner","full_name":"Kettner, Matthias"}],"user_id":"93637","department":[{"_id":"756"}],"project":[{"grant_number":"438445824","_id":"370","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI"}],"_id":"55869","language":[{"iso":"eng"}],"quality_controlled":"1","citation":{"ama":"Alpsancar S. Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen. In: Adolphi R, Alpsancar S, Hahn S, Kettner M, eds. <i> Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht</i>. transcript; 2024:55-113.","ieee":"S. Alpsancar, “Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen,” in <i> Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht</i>, R. Adolphi, S. Alpsancar, S. Hahn, and M. Kettner, Eds. Bielefeld: transcript, 2024, pp. 55–113.","chicago":"Alpsancar, Suzana. “Warum Und Wozu Erklärbare KI? Über Die Verschiedenheit Dreier Paradigmatischer Zwecksetzungen.” In <i> Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht</i>, edited by Rainer Adolphi, Suzana Alpsancar, Susanne Hahn, and Matthias Kettner, 55–113. Bielefeld: transcript, 2024.","short":"S. Alpsancar, in: R. Adolphi, S. Alpsancar, S. Hahn, M. Kettner (Eds.),  Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht, transcript, Bielefeld, 2024, pp. 55–113.","mla":"Alpsancar, Suzana. “Warum Und Wozu Erklärbare KI? Über Die Verschiedenheit Dreier Paradigmatischer Zwecksetzungen.” <i> Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht</i>, edited by Rainer Adolphi et al., transcript, 2024, pp. 55–113.","bibtex":"@inbook{Alpsancar_2024, place={Bielefeld}, title={Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen}, booktitle={ Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht}, publisher={transcript}, author={Alpsancar, Suzana}, editor={Adolphi, Rainer and Alpsancar, Suzana and Hahn, Susanne and Kettner, Matthias}, year={2024}, pages={55–113} }","apa":"Alpsancar, S. (2024). Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen. In R. Adolphi, S. Alpsancar, S. Hahn, &#38; M. Kettner (Eds.), <i> Philosophische Digitalisierungsforschung  Verantwortung, Verständigung, Vernunft, Macht</i> (pp. 55–113). transcript."},"page":"55-113","year":"2024","place":"Bielefeld","date_created":"2024-08-28T18:50:46Z","author":[{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana","id":"93637"}],"date_updated":"2024-08-28T18:51:44Z","oa":"1","publisher":"transcript","main_file_link":[{"url":"https://www.transcript-verlag.de/978-3-8376-7497-2/philosophische-digitalisierungsforschung/?number=978-3-8394-7497-6","open_access":"1"}],"title":"Warum und wozu erklärbare KI? Über die Verschiedenheit dreier paradigmatischer Zwecksetzungen"},{"citation":{"ama":"Reijers W, Matzner T, Alpsancar S, Philippi M. AI explainability, temporality, and civic virtue. In: <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.</i> ; 2024.","chicago":"Reijers, Wessel, Tobias Matzner, Suzana Alpsancar, and Martina Philippi. “AI Explainability, Temporality, and Civic Virtue.” In <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.</i> Longrono, 2024.","ieee":"W. Reijers, T. Matzner, S. Alpsancar, and M. Philippi, “AI explainability, temporality, and civic virtue,” 2024.","apa":"Reijers, W., Matzner, T., Alpsancar, S., &#38; Philippi, M. (2024). AI explainability, temporality, and civic virtue. <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.</i>","short":"W. Reijers, T. Matzner, S. Alpsancar, M. Philippi, in: Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024., Longrono, 2024.","bibtex":"@inproceedings{Reijers_Matzner_Alpsancar_Philippi_2024, place={Longrono}, title={AI explainability, temporality, and civic virtue}, booktitle={Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.}, author={Reijers, Wessel and Matzner, Tobias and Alpsancar, Suzana and Philippi, Martina}, year={2024} }","mla":"Reijers, Wessel, et al. “AI Explainability, Temporality, and Civic Virtue.” <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.</i>, 2024."},"year":"2024","place":"Longrono","publication_status":"published","main_file_link":[{"url":"https://dialnet.unirioja.es/descarga/articulo/9326093.pdf","open_access":"1"}],"title":"AI explainability, temporality, and civic virtue","author":[{"first_name":"Wessel","orcid":"0000-0003-2505-1587","last_name":"Reijers","full_name":"Reijers, Wessel","id":"102524"},{"id":"65695","full_name":"Matzner, Tobias","last_name":"Matzner","first_name":"Tobias"},{"id":"93637","full_name":"Alpsancar, Suzana","last_name":"Alpsancar","first_name":"Suzana"},{"last_name":"Philippi","id":"100856","full_name":"Philippi, Martina","first_name":"Martina"}],"date_created":"2024-11-18T10:06:46Z","date_updated":"2024-12-17T11:44:41Z","oa":"1","status":"public","publication":"Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT. Universidad de La Rioja, 2024.","type":"conference_abstract","language":[{"iso":"eng"}],"department":[{"_id":"756"}],"user_id":"93637","_id":"57172","project":[{"grant_number":"438445824","_id":"370","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI"}]},{"main_file_link":[{"open_access":"1","url":"https://dialnet.unirioja.es/descarga/articulo/9326091.pdf"}],"title":"Unpacking the purposes of explainable AI","author":[{"last_name":"Alpsancar","id":"93637","full_name":"Alpsancar, Suzana","first_name":"Suzana"},{"first_name":"Tobias","full_name":"Matzner, Tobias","last_name":"Matzner"},{"full_name":"Philippi, Martina","last_name":"Philippi","first_name":"Martina"}],"date_created":"2024-09-23T19:17:41Z","date_updated":"2024-12-17T11:46:27Z","publisher":"Universidad de La Rioja","oa":"1","page":"31-35","citation":{"short":"S. Alpsancar, T. Matzner, M. Philippi, in: Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT, Universidad de La Rioja, 2024, pp. 31–35.","bibtex":"@inproceedings{Alpsancar_Matzner_Philippi_2024, title={Unpacking the purposes of explainable AI}, booktitle={Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT}, publisher={Universidad de La Rioja}, author={Alpsancar, Suzana and Matzner, Tobias and Philippi, Martina}, year={2024}, pages={31–35} }","mla":"Alpsancar, Suzana, et al. “Unpacking the Purposes of Explainable AI.” <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT</i>, Universidad de La Rioja, 2024, pp. 31–35.","apa":"Alpsancar, S., Matzner, T., &#38; Philippi, M. (2024). Unpacking the purposes of explainable AI. <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT</i>, 31–35.","ama":"Alpsancar S, Matzner T, Philippi M. Unpacking the purposes of explainable AI. In: <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT</i>. Universidad de La Rioja; 2024:31-35.","chicago":"Alpsancar, Suzana, Tobias Matzner, and Martina Philippi. “Unpacking the Purposes of Explainable AI.” In <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT</i>, 31–35. Universidad de La Rioja, 2024.","ieee":"S. Alpsancar, T. Matzner, and M. Philippi, “Unpacking the purposes of explainable AI,” in <i>Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT</i>, 2024, pp. 31–35."},"year":"2024","language":[{"iso":"eng"}],"department":[{"_id":"756"}],"user_id":"93637","_id":"56217","project":[{"grant_number":"438445824","_id":"370","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI"}],"status":"public","publication":"Smart Ethics in the Digital World: Proceedings of the ETHICOMP 2024. 21th International Conference on the Ethical and Social Impacts of ICT","type":"conference_abstract"},{"date_created":"2025-07-29T14:29:48Z","author":[{"full_name":"Philippi, Martina","id":"100856","last_name":"Philippi","first_name":"Martina"}],"date_updated":"2025-07-29T15:10:00Z","conference":{"location":"ÖAW Wien","end_date":"2024-06-04","start_date":"2024-06-03","name":"TA24: Methoden für die Technikfolgenabschätzung – im Spannungsfeld zwischen bewährter Praxis und neuen Möglichkeiten"},"title":"Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung","related_material":{"link":[{"url":"https://www.oeaw.ac.at/ita/veranstaltungen/ta24-konferenz","relation":"other"}]},"citation":{"bibtex":"@inproceedings{Philippi_2024, title={Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung}, booktitle={TA24}, author={Philippi, Martina}, year={2024} }","mla":"Philippi, Martina. “Von Der Kunst, Die Richtigen Fragen Zu Stellen. Das Potential Der Phänomenologie Für Die Technikfolgenabschätzung.” <i>TA24</i>, 2024.","short":"M. Philippi, in: TA24, 2024.","apa":"Philippi, M. (2024). Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung. <i>TA24</i>. TA24: Methoden für die Technikfolgenabschätzung – im Spannungsfeld zwischen bewährter Praxis und neuen Möglichkeiten, ÖAW Wien.","ama":"Philippi M. Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung. In: <i>TA24</i>. ; 2024.","chicago":"Philippi, Martina. “Von Der Kunst, Die Richtigen Fragen Zu Stellen. Das Potential Der Phänomenologie Für Die Technikfolgenabschätzung.” In <i>TA24</i>, 2024.","ieee":"M. Philippi, “Von der Kunst, die richtigen Fragen zu stellen. Das Potential der Phänomenologie für die Technikfolgenabschätzung,” presented at the TA24: Methoden für die Technikfolgenabschätzung – im Spannungsfeld zwischen bewährter Praxis und neuen Möglichkeiten, ÖAW Wien, 2024."},"year":"2024","user_id":"100856","_id":"60826","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"language":[{"iso":"eng"}],"publication":"TA24","type":"conference_abstract","status":"public"},{"related_material":{"link":[{"url":"https://www.easst4s2024.net/","relation":"other"}]},"citation":{"ieee":"M. Philippi and D. Mindlin, “Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling,” presented at the EASST-4S 2024: Making and doing transformations, VU Amsterdam, 2024.","chicago":"Philippi, Martina, and Dimitry Mindlin. “Dealing Responsibly with Tacit Assumptions. An Interdisciplinary Approach to the Integration of Ethical Reflexion into User Modeling.” In <i>EASST-4S</i>, 2024.","ama":"Philippi M, Mindlin D. Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling. In: <i>EASST-4S</i>. ; 2024.","apa":"Philippi, M., &#38; Mindlin, D. (2024). Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling. <i>EASST-4S</i>. EASST-4S 2024: Making and doing transformations, VU Amsterdam.","bibtex":"@inproceedings{Philippi_Mindlin_2024, title={Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling}, booktitle={EASST-4S}, author={Philippi, Martina and Mindlin, Dimitry}, year={2024} }","mla":"Philippi, Martina, and Dimitry Mindlin. “Dealing Responsibly with Tacit Assumptions. An Interdisciplinary Approach to the Integration of Ethical Reflexion into User Modeling.” <i>EASST-4S</i>, 2024.","short":"M. Philippi, D. Mindlin, in: EASST-4S, 2024."},"year":"2024","date_created":"2025-07-29T14:25:14Z","author":[{"last_name":"Philippi","full_name":"Philippi, Martina","id":"100856","first_name":"Martina"},{"last_name":"Mindlin","full_name":"Mindlin, Dimitry","first_name":"Dimitry"}],"date_updated":"2025-07-29T15:09:55Z","conference":{"name":"EASST-4S 2024: Making and doing transformations","start_date":"2024-07-16","end_date":"2024-07-19","location":"VU Amsterdam"},"title":"Dealing responsibly with tacit assumptions. An interdisciplinary approach to the integration of ethical reflexion into user modeling","publication":"EASST-4S","type":"conference_abstract","status":"public","user_id":"100856","_id":"60825","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"language":[{"iso":"eng"}]},{"language":[{"iso":"eng"}],"user_id":"100856","_id":"60824","project":[{"grant_number":"438445824","_id":"370","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI"}],"status":"public","publication":"fpet2024","type":"conference_abstract","conference":{"start_date":"2024-09-17","name":"fpet (Forum on Philosophy, Engineering, and Technology) 2024","location":"ZKM Karlsruhe","end_date":"2024-09-19"},"title":"How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI","author":[{"last_name":"Philippi","id":"100856","full_name":"Philippi, Martina","first_name":"Martina"}],"date_created":"2025-07-29T14:20:44Z","date_updated":"2025-07-29T15:10:26Z","citation":{"chicago":"Philippi, Martina. “How to Address Ethical Problems in a Multi-Perspective Context: Interdisciplinary Challenges of XAI.” In <i>Fpet2024</i>, 2024.","ieee":"M. Philippi, “How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI,” presented at the fpet (Forum on Philosophy, Engineering, and Technology) 2024, ZKM Karlsruhe, 2024.","short":"M. Philippi, in: Fpet2024, 2024.","bibtex":"@inproceedings{Philippi_2024, title={How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI}, booktitle={fpet2024}, author={Philippi, Martina}, year={2024} }","mla":"Philippi, Martina. “How to Address Ethical Problems in a Multi-Perspective Context: Interdisciplinary Challenges of XAI.” <i>Fpet2024</i>, 2024.","ama":"Philippi M. How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI. In: <i>Fpet2024</i>. ; 2024.","apa":"Philippi, M. (2024). How to address ethical problems in a multi-perspective context: Interdisciplinary challenges of XAI. <i>Fpet2024</i>. fpet (Forum on Philosophy, Engineering, and Technology) 2024, ZKM Karlsruhe."},"year":"2024","related_material":{"link":[{"relation":"other","url":"https://www.fpet2024.org/"}]}},{"user_id":"100856","project":[{"grant_number":"438445824","name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370"}],"_id":"60829","language":[{"iso":"eng"}],"type":"conference_abstract","status":"public","date_created":"2025-07-29T14:43:32Z","author":[{"last_name":"Philippi","id":"100856","full_name":"Philippi, Martina","first_name":"Martina"},{"full_name":"Reijers, Wessel","id":"102524","last_name":"Reijers","orcid":"0000-0003-2505-1587","first_name":"Wessel"}],"date_updated":"2025-07-29T15:04:32Z","conference":{"location":"Paderborn","end_date":"2024-05-17","start_date":"2024-05-15","name":"Ethics and Normativity of Explainable AI"},"title":"Ethics of Explainable AI","related_material":{"link":[{"url":"https://trr318.uni-paderborn.de/workshops/2024/ethics-and-normativity-of-explainable-ai-workshop","relation":"other"}]},"citation":{"apa":"Philippi, M., &#38; Reijers, W. (2024). <i>Ethics of Explainable AI</i>. Ethics and Normativity of Explainable AI, Paderborn.","short":"M. Philippi, W. Reijers, in: 2024.","mla":"Philippi, Martina, and Wessel Reijers. <i>Ethics of Explainable AI</i>. 2024.","bibtex":"@inproceedings{Philippi_Reijers_2024, title={Ethics of Explainable AI}, author={Philippi, Martina and Reijers, Wessel}, year={2024} }","chicago":"Philippi, Martina, and Wessel Reijers. “Ethics of Explainable AI,” 2024.","ieee":"M. Philippi and W. Reijers, “Ethics of Explainable AI,” presented at the Ethics and Normativity of Explainable AI, Paderborn, 2024.","ama":"Philippi M, Reijers W. Ethics of Explainable AI. In: ; 2024."},"year":"2024"},{"type":"conference_abstract","status":"public","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"_id":"60827","user_id":"100856","language":[{"iso":"eng"}],"year":"2024","citation":{"apa":"Philippi, M. (2024). <i>Dual-use potential in humanitarian UAVs</i>. Human-machine learning? Interaction with deadly machines, Paderborn.","short":"M. Philippi, in: 2024.","bibtex":"@inproceedings{Philippi_2024, title={Dual-use potential in humanitarian UAVs}, author={Philippi, Martina}, year={2024} }","mla":"Philippi, Martina. <i>Dual-Use Potential in Humanitarian UAVs</i>. 2024.","chicago":"Philippi, Martina. “Dual-Use Potential in Humanitarian UAVs,” 2024.","ieee":"M. Philippi, “Dual-use potential in humanitarian UAVs,” presented at the Human-machine learning? Interaction with deadly machines, Paderborn, 2024.","ama":"Philippi M. Dual-use potential in humanitarian UAVs. In: ; 2024."},"date_updated":"2025-07-29T15:08:21Z","date_created":"2025-07-29T14:34:00Z","author":[{"id":"100856","full_name":"Philippi, Martina","last_name":"Philippi","first_name":"Martina"}],"title":"Dual-use potential in humanitarian UAVs","conference":{"start_date":"2024-01-30","name":"Human-machine learning? Interaction with deadly machines","location":"Paderborn"}},{"date_created":"2025-07-29T14:50:34Z","author":[{"id":"100856","full_name":"Philippi, Martina","last_name":"Philippi","first_name":"Martina"}],"date_updated":"2025-07-29T15:04:12Z","title":"Interdisciplinary challenges for XAI ethics and the potential of the phenomenological approach. Gastvortrag am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.","citation":{"ieee":"M. Philippi, <i>Interdisciplinary challenges for XAI ethics and the potential of the phenomenological approach. Gastvortrag am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.</i> 2024.","chicago":"Philippi, Martina. <i>Interdisciplinary Challenges for XAI Ethics and the Potential of the Phenomenological Approach. Gastvortrag Am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.</i>, 2024.","ama":"Philippi M. <i>Interdisciplinary Challenges for XAI Ethics and the Potential of the Phenomenological Approach. Gastvortrag Am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.</i>; 2024.","apa":"Philippi, M. (2024). <i>Interdisciplinary challenges for XAI ethics and the potential of the phenomenological approach. Gastvortrag am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.</i>","short":"M. Philippi, Interdisciplinary Challenges for XAI Ethics and the Potential of the Phenomenological Approach. Gastvortrag Am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024., 2024.","mla":"Philippi, Martina. <i>Interdisciplinary Challenges for XAI Ethics and the Potential of the Phenomenological Approach. Gastvortrag Am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.</i> 2024.","bibtex":"@book{Philippi_2024, title={Interdisciplinary challenges for XAI ethics and the potential of the phenomenological approach. Gastvortrag am Eindhoven Center for the Philosophy of AI (ECPAI), TU Eindhoven, 25. Juni 2024.}, author={Philippi, Martina}, year={2024} }"},"year":"2024","user_id":"100856","project":[{"name":"TRR 318 - B06: TRR 318 - Teilprojekt B6 - Ethik und Normativität der erklärbaren KI","_id":"370","grant_number":"438445824"}],"_id":"60831","language":[{"iso":"eng"}],"type":"misc","status":"public"}]
