[{"language":[{"iso":"eng"}],"department":[{"_id":"26"},{"_id":"756"}],"user_id":"93637","_id":"65061","project":[{"_id":"109","name":"TRR 318: Erklärbarkeit konstruieren"},{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>\r\n                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.\r\n                    <jats:ext-link xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001\" ext-link-type=\"uri\">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>\r\n                    ) and offer three structures that can help to organize responsibility for\r\n                    <jats:italic>decisions made</jats:italic>\r\n                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.\r\n                  </jats:p>","lang":"eng"}],"publication":"Social Explainable AI","type":"book_chapter","doi":"10.1007/978-981-96-5290-7_9","main_file_link":[{"url":"https://doi.org/10.1007/978-981-96-5290-7_9","open_access":"1"}],"title":"Responsibilities in sXAI","author":[{"first_name":"Katharina J.","orcid":"0000-0002-5676-8233","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina J."},{"id":"93637","full_name":"Alpsancar, Suzana","last_name":"Alpsancar","first_name":"Suzana"},{"first_name":"Carsten","last_name":"Schulte","full_name":"Schulte, Carsten","id":"60311"}],"date_created":"2026-03-19T10:59:18Z","oa":"1","date_updated":"2026-03-19T11:53:01Z","publisher":"Springer Nature Singapore","page":"157-177","citation":{"mla":"Rohlfing, Katharina J., et al. “Responsibilities in SXAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 157–77, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>.","bibtex":"@inbook{Rohlfing_Alpsancar_Schulte_2026, place={Singapore}, title={Responsibilities in sXAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}, year={2026}, pages={157–177} }","short":"K.J. Rohlfing, S. Alpsancar, C. Schulte, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 157–177.","apa":"Rohlfing, K. J., Alpsancar, S., &#38; Schulte, C. (2026). Responsibilities in sXAI. In <i>Social Explainable AI</i> (pp. 157–177). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">https://doi.org/10.1007/978-981-96-5290-7_9</a>","chicago":"Rohlfing, Katharina J., Suzana Alpsancar, and Carsten Schulte. “Responsibilities in SXAI.” In <i>Social Explainable AI</i>, 157–77. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">https://doi.org/10.1007/978-981-96-5290-7_9</a>.","ieee":"K. J. Rohlfing, S. Alpsancar, and C. Schulte, “Responsibilities in sXAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 157–177.","ama":"Rohlfing KJ, Alpsancar S, Schulte C. Responsibilities in sXAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:157-177. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_9\">10.1007/978-981-96-5290-7_9</a>"},"place":"Singapore","year":"2026","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"publication_status":"published"},{"status":"public","type":"book_chapter","project":[{"_id":"370","name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI"}],"_id":"65063","user_id":"93637","department":[{"_id":"26"},{"_id":"756"}],"place":"Singapore","citation":{"chicago":"Alpsancar, Suzana, and Eugenia Stamboliev. “Tasking AI Fairly. How to Empower AI Practitioners With SXAI?” In <i>Social Explainable AI</i>, 557–81. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">https://doi.org/10.1007/978-981-96-5290-7_29</a>.","ieee":"S. Alpsancar and E. Stamboliev, “Tasking AI Fairly. How to Empower AI Practitioners With sXAI?,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 557–581.","ama":"Alpsancar S, Stamboliev E. Tasking AI Fairly. How to Empower AI Practitioners With sXAI? In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:557-581. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>","apa":"Alpsancar, S., &#38; Stamboliev, E. (2026). Tasking AI Fairly. How to Empower AI Practitioners With sXAI? In <i>Social Explainable AI</i> (pp. 557–581). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">https://doi.org/10.1007/978-981-96-5290-7_29</a>","short":"S. Alpsancar, E. Stamboliev, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 557–581.","bibtex":"@inbook{Alpsancar_Stamboliev_2026, place={Singapore}, title={Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Alpsancar, Suzana and Stamboliev, Eugenia}, year={2026}, pages={557–581} }","mla":"Alpsancar, Suzana, and Eugenia Stamboliev. “Tasking AI Fairly. How to Empower AI Practitioners With SXAI?” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 557–81, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_29\">10.1007/978-981-96-5290-7_29</a>."},"page":"557-581","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"main_file_link":[{"url":"https://doi.org/10.1007/978-981-96-5290-7_29","open_access":"1"}],"doi":"10.1007/978-981-96-5290-7_29","date_updated":"2026-03-19T11:53:42Z","oa":"1","author":[{"full_name":"Alpsancar, Suzana","id":"93637","last_name":"Alpsancar","first_name":"Suzana"},{"first_name":"Eugenia","last_name":"Stamboliev","full_name":"Stamboliev, Eugenia"}],"abstract":[{"lang":"eng","text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>\r\n                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to\r\n                    <jats:italic>tasking AI fairly</jats:italic>\r\n                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.\r\n                  </jats:p>"}],"publication":"Social Explainable AI","language":[{"iso":"eng"}],"year":"2026","title":"Tasking AI Fairly. How to Empower AI Practitioners With sXAI?","publisher":"Springer Nature Singapore","date_created":"2026-03-19T11:03:30Z"},{"citation":{"ama":"Alpsancar S, Klenk M. The Risk of Manipulation and Deception in sXAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026:583-616. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>","chicago":"Alpsancar, Suzana, and Michael Klenk. “The Risk of Manipulation and Deception in SXAI.” In <i>Social Explainable AI</i>, 583–616. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">https://doi.org/10.1007/978-981-96-5290-7_30</a>.","ieee":"S. Alpsancar and M. Klenk, “The Risk of Manipulation and Deception in sXAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026, pp. 583–616.","apa":"Alpsancar, S., &#38; Klenk, M. (2026). The Risk of Manipulation and Deception in sXAI. In <i>Social Explainable AI</i> (pp. 583–616). Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">https://doi.org/10.1007/978-981-96-5290-7_30</a>","short":"S. Alpsancar, M. Klenk, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026, pp. 583–616.","bibtex":"@inbook{Alpsancar_Klenk_2026, place={Singapore}, title={The Risk of Manipulation and Deception in sXAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Alpsancar, Suzana and Klenk, Michael}, year={2026}, pages={583–616} }","mla":"Alpsancar, Suzana, and Michael Klenk. “The Risk of Manipulation and Deception in SXAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, pp. 583–616, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_30\">10.1007/978-981-96-5290-7_30</a>."},"page":"583-616","place":"Singapore","year":"2026","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"main_file_link":[{"url":" https://doi.org/10.1007/978-981-96-5290-7_30","open_access":"1"}],"doi":"10.1007/978-981-96-5290-7_30","title":"The Risk of Manipulation and Deception in sXAI","author":[{"first_name":"Suzana","full_name":"Alpsancar, Suzana","id":"93637","last_name":"Alpsancar"},{"first_name":"Michael","full_name":"Klenk, Michael","last_name":"Klenk"}],"date_created":"2026-03-19T11:05:30Z","oa":"1","date_updated":"2026-03-19T11:52:00Z","publisher":"Springer Nature Singapore","status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>","lang":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","language":[{"iso":"eng"}],"user_id":"93637","department":[{"_id":"26"},{"_id":"756"}],"project":[{"name":"TRR 318: Erklärbarkeit konstruieren","_id":"109"},{"name":"TRR 318; TP B06: Ethik und Normativität der erklärbaren KI","_id":"370"}],"_id":"65064"},{"language":[{"iso":"eng"}],"project":[{"_id":"109","name":"TRR 318: Erklärbarkeit konstruieren"}],"_id":"65065","user_id":"93637","department":[{"_id":"26"},{"_id":"756"}],"editor":[{"first_name":"Katharina J.","last_name":"Rohlfing","orcid":"0000-0002-5676-8233","id":"50352","full_name":"Rohlfing, Katharina J."},{"first_name":"Kary","last_name":"Främling","full_name":"Främling, Kary"},{"full_name":"Lim, Brian","last_name":"Lim","first_name":"Brian"},{"first_name":"Suzana","full_name":"Alpsancar, Suzana","id":"93637","last_name":"Alpsancar"},{"last_name":"Thommes","id":"72497","full_name":"Thommes, Kirsten","first_name":"Kirsten"}],"abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>","lang":"eng"}],"status":"public","type":"book_editor","title":"Social Explainable AI","main_file_link":[{"open_access":"1","url":"https://link.springer.com/book/10.1007/978-981-96-5290-7"}],"doi":"10.1007/978-981-96-5290-7_1","publisher":"Springer Nature Singapore","oa":"1","date_updated":"2026-03-19T11:59:42Z","date_created":"2026-03-19T11:55:17Z","place":"Singapore","year":"2026","citation":{"apa":"Rohlfing, K. J., Främling, K., Lim, B., Alpsancar, S., &#38; Thommes, K. (Eds.). (2026). <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_1\">https://doi.org/10.1007/978-981-96-5290-7_1</a>","short":"K.J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, K. Thommes, eds., Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","bibtex":"@book{Rohlfing_Främling_Lim_Alpsancar_Thommes_2026, place={Singapore}, title={Social Explainable AI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_1\">10.1007/978-981-96-5290-7_1</a>}, publisher={Springer Nature Singapore}, year={2026} }","mla":"Rohlfing, Katharina J., et al., editors. <i>Social Explainable AI</i>. Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_1\">10.1007/978-981-96-5290-7_1</a>.","ieee":"K. J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, and K. Thommes, Eds., <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026.","chicago":"Rohlfing, Katharina J., Kary Främling, Brian Lim, Suzana Alpsancar, and Kirsten Thommes, eds. <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_1\">https://doi.org/10.1007/978-981-96-5290-7_1</a>.","ama":"Rohlfing KJ, Främling K, Lim B, Alpsancar S, Thommes K, eds. <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_1\">10.1007/978-981-96-5290-7_1</a>"},"publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]}},{"oa":"1","publisher":"Springer","date_updated":"2026-03-20T09:22:08Z","author":[{"last_name":"Främling","full_name":"Främling, Kary","first_name":"Kary"},{"first_name":"Rachid","full_name":"Alami, Rachid","last_name":"Alami"},{"first_name":"Joris","full_name":"Hulstijn, Joris","last_name":"Hulstijn"},{"first_name":"Igor","last_name":"Tchappi","full_name":"Tchappi, Igor"},{"first_name":"Angela","id":"57578","full_name":"Grimminger, Angela","last_name":"Grimminger"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"},{"full_name":"Buschmeier, Hendrik","last_name":"Buschmeier","first_name":"Hendrik"},{"full_name":"Kubler, Sylvain","last_name":"Kubler","first_name":"Sylvain"}],"date_created":"2026-03-20T09:18:01Z","title":"Scenarios of Social Explainable AI in practice","doi":"10.1007/978-981-96-5290-7_2","main_file_link":[{"open_access":"1"}],"publication_identifier":{"isbn":["9789819652891","9789819652907"]},"quality_controlled":"1","publication_status":"published","related_material":{"link":[{"url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_2","relation":"original"}]},"year":"2026","page":"19-38","citation":{"bibtex":"@inbook{Främling_Alami_Hulstijn_Tchappi_Grimminger_Wrede_Buschmeier_Kubler_2026, title={Scenarios of Social Explainable AI in practice}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_2\">10.1007/978-981-96-5290-7_2</a>}, booktitle={Social Explainable AI}, publisher={Springer}, author={Främling, Kary and Alami, Rachid and Hulstijn, Joris and Tchappi, Igor and Grimminger, Angela and Wrede, Britta and Buschmeier, Hendrik and Kubler, Sylvain}, editor={Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}, year={2026}, pages={19–38} }","mla":"Främling, Kary, et al. “Scenarios of Social Explainable AI in Practice.” <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing et al., Springer, 2026, pp. 19–38, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_2\">10.1007/978-981-96-5290-7_2</a>.","short":"K. Främling, R. Alami, J. Hulstijn, I. Tchappi, A. Grimminger, B. Wrede, H. Buschmeier, S. Kubler, in: K.J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, B.Y. Lim (Eds.), Social Explainable AI, Springer, 2026, pp. 19–38.","apa":"Främling, K., Alami, R., Hulstijn, J., Tchappi, I., Grimminger, A., Wrede, B., Buschmeier, H., &#38; Kubler, S. (2026). Scenarios of Social Explainable AI in practice. In K. J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, &#38; B. Y. Lim (Eds.), <i>Social Explainable AI</i> (pp. 19–38). Springer. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_2\">https://doi.org/10.1007/978-981-96-5290-7_2</a>","ama":"Främling K, Alami R, Hulstijn J, et al. Scenarios of Social Explainable AI in practice. In: Rohlfing KJ, Främling K, Alpsancar S, Thommes K, Lim BY, eds. <i>Social Explainable AI</i>. Springer; 2026:19-38. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_2\">10.1007/978-981-96-5290-7_2</a>","chicago":"Främling, Kary, Rachid Alami, Joris Hulstijn, Igor Tchappi, Angela Grimminger, Britta Wrede, Hendrik Buschmeier, and Sylvain Kubler. “Scenarios of Social Explainable AI in Practice.” In <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing, Kary Främling, Suzana Alpsancar, Kirsten Thommes, and Brian Y. Lim, 19–38. Springer, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_2\">https://doi.org/10.1007/978-981-96-5290-7_2</a>.","ieee":"K. Främling <i>et al.</i>, “Scenarios of Social Explainable AI in practice,” in <i>Social Explainable AI</i>, K. J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, and B. Y. Lim, Eds. Springer, 2026, pp. 19–38."},"_id":"65069","user_id":"57578","language":[{"iso":"eng"}],"publication":"Social Explainable AI","type":"book_chapter","editor":[{"first_name":"Katharina J.","last_name":"Rohlfing","full_name":"Rohlfing, Katharina J."},{"first_name":"Kary","full_name":"Främling, Kary","last_name":"Främling"},{"full_name":"Alpsancar, Suzana","last_name":"Alpsancar","first_name":"Suzana"},{"full_name":"Thommes, Kirsten","last_name":"Thommes","first_name":"Kirsten"},{"full_name":"Lim, Brian Y.","last_name":"Lim","first_name":"Brian Y."}],"status":"public"},{"project":[{"_id":"113","name":"TRR 318 - Subproject A3"},{"name":"TRR 318 - Subproject C2","_id":"125"}],"_id":"65090","user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"language":[{"iso":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>If XAI are to become social XAI, XAI methods must have capabilities enabling them to ‘extract’ information about the underlying AI model and to generate explanatory content based on that information. In a dialog between explainer and explainee, the explanans presented in every explanation move have to relate to each other understandably and coherently in order to remain trustworthy. This signifies that the generated explanantia have to be consistent—independently of what question is answered by each explanans, in what modality, in what vocabulary, and at what level of abstraction. Moreover, it is advantageous to be able to provide a rich palette of different kinds of explanantia in order to be able to have a fluent dialog in which the explanantia can be generated and adapted to the context, the explainee, feedback, reactions during the interaction with the explainee, and so forth. This chapter attempts to identify relevant questions that an explainee might ask during an explanatory dialog, and it assesses to what extent different XAI methods are capable of addressing these questions in a coherent way. The Contextual Importance and Utility (CIU) method is used to illustrate how an XAI method can generate explanantia for most of the identified questions. CIU also provides a flexibility in how explanatory content is generated that makes it possible to create a meaningful dialog with the explainee.</jats:p>","lang":"eng"}],"status":"public","date_updated":"2026-03-23T08:49:08Z","publisher":"Springer Nature Singapore","author":[{"first_name":"Kary","last_name":"Främling","full_name":"Främling, Kary"},{"first_name":"Kirsten","full_name":"Thommes, Kirsten","id":"72497","last_name":"Thommes"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"date_created":"2026-03-23T08:48:38Z","title":"Generation of Explanatory Content and Requirements for Social XAI","doi":"10.1007/978-981-96-5290-7_15","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"place":"Singapore","year":"2026","citation":{"ama":"Främling K, Thommes K, Wrede B. Generation of Explanatory Content and Requirements for Social XAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_15\">10.1007/978-981-96-5290-7_15</a>","ieee":"K. Främling, K. Thommes, and B. Wrede, “Generation of Explanatory Content and Requirements for Social XAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","chicago":"Främling, Kary, Kirsten Thommes, and Britta Wrede. “Generation of Explanatory Content and Requirements for Social XAI.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_15\">https://doi.org/10.1007/978-981-96-5290-7_15</a>.","apa":"Främling, K., Thommes, K., &#38; Wrede, B. (2026). Generation of Explanatory Content and Requirements for Social XAI. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_15\">https://doi.org/10.1007/978-981-96-5290-7_15</a>","short":"K. Främling, K. Thommes, B. Wrede, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","mla":"Främling, Kary, et al. “Generation of Explanatory Content and Requirements for Social XAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_15\">10.1007/978-981-96-5290-7_15</a>.","bibtex":"@inbook{Främling_Thommes_Wrede_2026, place={Singapore}, title={Generation of Explanatory Content and Requirements for Social XAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_15\">10.1007/978-981-96-5290-7_15</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Främling, Kary and Thommes, Kirsten and Wrede, Britta}, year={2026} }"}},{"title":"Measuring the Outcome of sXAI","doi":"10.1007/978-981-96-5290-7_28","date_updated":"2026-03-23T08:45:33Z","publisher":"Springer Nature Singapore","date_created":"2026-03-23T08:45:05Z","author":[{"first_name":"Kirsten","last_name":"Thommes","id":"72497","full_name":"Thommes, Kirsten"}],"year":"2026","place":"Singapore","citation":{"ama":"Thommes K. Measuring the Outcome of sXAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_28\">10.1007/978-981-96-5290-7_28</a>","apa":"Thommes, K. (2026). Measuring the Outcome of sXAI. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_28\">https://doi.org/10.1007/978-981-96-5290-7_28</a>","short":"K. Thommes, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","bibtex":"@inbook{Thommes_2026, place={Singapore}, title={Measuring the Outcome of sXAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_28\">10.1007/978-981-96-5290-7_28</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Thommes, Kirsten}, year={2026} }","mla":"Thommes, Kirsten. “Measuring the Outcome of SXAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_28\">10.1007/978-981-96-5290-7_28</a>.","ieee":"K. Thommes, “Measuring the Outcome of sXAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","chicago":"Thommes, Kirsten. “Measuring the Outcome of SXAI.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_28\">https://doi.org/10.1007/978-981-96-5290-7_28</a>."},"publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"language":[{"iso":"eng"}],"project":[{"name":"TRR 318 - Subproject A3","_id":"113"},{"_id":"125","name":"TRR 318 - Subproject C2"}],"_id":"65088","user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>Quantitatively evaluating the benefits of eXplainable Artificial Intelligence (XAI) and social XAI for humans is not a trivial pursuit. Therefore, we categorize the potential measures in terms of subjective and objective outcomes and short- and long-term outcomes of interactive social XAI. When reviewing the current state of the art, we observed some measurement problems in the literature: (a) Researchers do not clearly state whether they want to measure the inner state of users, users’ behavioral response, or the overall AI-human collaborative performance. (b) Moreover, most measures implicitly assume that all humans either do not react or improve in attitudes or performance. Psychological reactance (feeling or doing the opposite) is usually not captured. (c) Many researchers invent their own scale when measuring psychological constructs, thereby jeopardizing the validity of their measures and slowing down progress in the field, because general evidence and subsequent learning can be achieved only by collecting many compatible pieces of evidence. (d) Most studies look into short-term outcomes and neglect that experiences in social interactions with XAI may evolve and have long-term outcomes not only for the individual but also for groups or society at large.</jats:p>","lang":"eng"}],"status":"public","type":"book_chapter","publication":"Social Explainable AI"},{"type":"book_chapter","publication":"Social Explainable AI","status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>Explainable AI (XAI) aims to make the decisions and behavior of an AI understandable to the people interacting with it and to those affected by its outcomes. To make XAI social, real-world XAI systems need to simulate not only the ways in which human explainers behave within explanatory dialogs but also the ways in which such dialogs can successfully achieve the intended understanding on the explainee’s side. This, in turn, requires an operationalization of the three core aspects of social XAI: multimodality, incrementality, and patterns. This chapter lays the ground for this goal by defining a basic operational model of social interactions that can be refined and extended to account for the specificities of any explanatory real-world setting. This serves as a basis for summarizing and discussing existing ideas from explainability research and related areas in order to operationalize each core aspect. Selected examples and case studies illustrate how to concretely realize such an operationalization, thereby serving as a starting point for future research on social interaction with XAI.</jats:p>","lang":"eng"}],"user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"project":[{"name":"TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318 - Subproject C2","_id":"125"}],"_id":"65086","language":[{"iso":"eng"}],"publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"citation":{"ama":"Wachsmuth H, Thommes K, Alshomary M. Operationalizing Social Interaction. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_27\">10.1007/978-981-96-5290-7_27</a>","ieee":"H. Wachsmuth, K. Thommes, and M. Alshomary, “Operationalizing Social Interaction,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","chicago":"Wachsmuth, Henning, Kirsten Thommes, and Milad Alshomary. “Operationalizing Social Interaction.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_27\">https://doi.org/10.1007/978-981-96-5290-7_27</a>.","short":"H. Wachsmuth, K. Thommes, M. Alshomary, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","mla":"Wachsmuth, Henning, et al. “Operationalizing Social Interaction.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_27\">10.1007/978-981-96-5290-7_27</a>.","bibtex":"@inbook{Wachsmuth_Thommes_Alshomary_2026, place={Singapore}, title={Operationalizing Social Interaction}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_27\">10.1007/978-981-96-5290-7_27</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Wachsmuth, Henning and Thommes, Kirsten and Alshomary, Milad}, year={2026} }","apa":"Wachsmuth, H., Thommes, K., &#38; Alshomary, M. (2026). Operationalizing Social Interaction. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_27\">https://doi.org/10.1007/978-981-96-5290-7_27</a>"},"place":"Singapore","year":"2026","date_created":"2026-03-23T08:42:37Z","author":[{"full_name":"Wachsmuth, Henning","last_name":"Wachsmuth","first_name":"Henning"},{"id":"72497","full_name":"Thommes, Kirsten","last_name":"Thommes","first_name":"Kirsten"},{"first_name":"Milad","last_name":"Alshomary","full_name":"Alshomary, Milad"}],"date_updated":"2026-03-23T08:43:25Z","publisher":"Springer Nature Singapore","doi":"10.1007/978-981-96-5290-7_27","title":"Operationalizing Social Interaction"},{"user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"project":[{"name":"TRR 318 - Subproject A3","_id":"113"},{"_id":"125","name":"TRR 318 - Subproject C2"}],"_id":"65091","language":[{"iso":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","status":"public","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>This chapter examines key challenges and potential improvements in the areas of user interaction and dynamic explanations. It highlights the need for XAI systems to address context factors beyond their predefined scope, it points to the potential need to cocreate new concepts that are adapted to particular explainees, and it provides a clear overview of the XAI system’s underlying knowledge structure and interaction steps. Emphasis is placed on mixed-initiative interaction in which the system can lead or respond based on the context and the explainee’s reactions while asserting the importance of maintaining coherence across consecutive explanations. These advances aim to make XAI systems more flexible, interactive, and user-centric. An operationalization section outlines how such social XAI systems could be implemented based on the XAI capabilities provided by the Contextual Importance and Utility XAI method described in the previous chapter.</jats:p>","lang":"eng"}],"author":[{"first_name":"Kary","last_name":"Främling","full_name":"Främling, Kary"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"first_name":"Kirsten","last_name":"Thommes","id":"72497","full_name":"Thommes, Kirsten"}],"date_created":"2026-03-23T08:52:18Z","date_updated":"2026-03-23T08:52:46Z","publisher":"Springer Nature Singapore","doi":"10.1007/978-981-96-5290-7_16","title":"Exploration of Explaining Content","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"citation":{"chicago":"Främling, Kary, Britta Wrede, and Kirsten Thommes. “Exploration of Explaining Content.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_16\">https://doi.org/10.1007/978-981-96-5290-7_16</a>.","ieee":"K. Främling, B. Wrede, and K. Thommes, “Exploration of Explaining Content,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","ama":"Främling K, Wrede B, Thommes K. Exploration of Explaining Content. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_16\">10.1007/978-981-96-5290-7_16</a>","apa":"Främling, K., Wrede, B., &#38; Thommes, K. (2026). Exploration of Explaining Content. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_16\">https://doi.org/10.1007/978-981-96-5290-7_16</a>","bibtex":"@inbook{Främling_Wrede_Thommes_2026, place={Singapore}, title={Exploration of Explaining Content}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_16\">10.1007/978-981-96-5290-7_16</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Främling, Kary and Wrede, Britta and Thommes, Kirsten}, year={2026} }","mla":"Främling, Kary, et al. “Exploration of Explaining Content.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_16\">10.1007/978-981-96-5290-7_16</a>.","short":"K. Främling, B. Wrede, K. Thommes, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026."},"place":"Singapore","year":"2026"},{"publisher":"Springer Nature Singapore","date_updated":"2026-03-23T08:44:37Z","date_created":"2026-03-23T08:43:50Z","author":[{"id":"72497","full_name":"Thommes, Kirsten","last_name":"Thommes","first_name":"Kirsten"},{"first_name":"Kary","last_name":"Främling","full_name":"Främling, Kary"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"},{"first_name":"Sylvain","full_name":"Kubler, Sylvain","last_name":"Kubler"}],"title":"Interaction History in Social XAI","doi":"10.1007/978-981-96-5290-7_17","publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"place":"Singapore","year":"2026","citation":{"ama":"Thommes K, Främling K, Wrede B, Kubler S. Interaction History in Social XAI. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_17\">10.1007/978-981-96-5290-7_17</a>","ieee":"K. Thommes, K. Främling, B. Wrede, and S. Kubler, “Interaction History in Social XAI,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","chicago":"Thommes, Kirsten, Kary Främling, Britta Wrede, and Sylvain Kubler. “Interaction History in Social XAI.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_17\">https://doi.org/10.1007/978-981-96-5290-7_17</a>.","short":"K. Thommes, K. Främling, B. Wrede, S. Kubler, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","bibtex":"@inbook{Thommes_Främling_Wrede_Kubler_2026, place={Singapore}, title={Interaction History in Social XAI}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_17\">10.1007/978-981-96-5290-7_17</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Thommes, Kirsten and Främling, Kary and Wrede, Britta and Kubler, Sylvain}, year={2026} }","mla":"Thommes, Kirsten, et al. “Interaction History in Social XAI.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_17\">10.1007/978-981-96-5290-7_17</a>.","apa":"Thommes, K., Främling, K., Wrede, B., &#38; Kubler, S. (2026). Interaction History in Social XAI. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_17\">https://doi.org/10.1007/978-981-96-5290-7_17</a>"},"project":[{"_id":"125","name":"TRR 318 - Subproject C2"},{"name":"TRR 318 - Subproject A3","_id":"113"}],"_id":"65087","user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"language":[{"iso":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","abstract":[{"text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>Much research in XAI focuses on single, one-shot interactions, implicitly assuming that interactions have no past, no future, and no surroundings. Although this assumption may be necessary for many empirical research settings, it is overly simplifying and unrealistic. Whereas empirical research focuses on a world in which no social context exists, real applications are embedded in a temporal (past and future) and social context. Social science research shows that repeated interactions and secondhand knowledge in the social space massively affect human attitudes and behaviors. This chapter explains how not only repeated interactions between XAI and humans but also the social space and secondhand information may affect social XAI research.</jats:p>","lang":"eng"}],"status":"public"},{"type":"book_chapter","publication":"Social Explainable AI","abstract":[{"lang":"eng","text":"<jats:title>Abstract</jats:title>\r\n                  <jats:p>In the past, there has been much research aiming to evaluate XAI practices—that is, explanations that can add to a user’s understanding of “why” or “why not.” However, because there is such a huge amount of diversity in social contexts, optimizing for the mean neglects the social dimensions of to whom, what, why, when, and where explanations are provided. Nonetheless, these dimensions matter. We give some brief examples on the accuracy of the mental model (as an example for who?), on measuring explanation practices (as an example of what?), on human motivation (as an example of why?), on repeated interactions (as an example of when), and on bystander effects (as an example of where?). Importantly, controlling for these factors (or randomizing them) is as important as attempting to perform external validations.</jats:p>"}],"status":"public","project":[{"_id":"113","name":"TRR 318 - Subproject A3"},{"name":"TRR 318 - Subproject C2","_id":"125"}],"_id":"65089","user_id":"72497","department":[{"_id":"178"},{"_id":"184"}],"language":[{"iso":"eng"}],"publication_status":"published","publication_identifier":{"isbn":["9789819652891","9789819652907"]},"place":"Singapore","year":"2026","citation":{"chicago":"Thommes, Kirsten. “Evaluation Principles.” In <i>Social Explainable AI</i>. Singapore: Springer Nature Singapore, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_26\">https://doi.org/10.1007/978-981-96-5290-7_26</a>.","ieee":"K. Thommes, “Evaluation Principles,” in <i>Social Explainable AI</i>, Singapore: Springer Nature Singapore, 2026.","ama":"Thommes K. Evaluation Principles. In: <i>Social Explainable AI</i>. Springer Nature Singapore; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_26\">10.1007/978-981-96-5290-7_26</a>","short":"K. Thommes, in: Social Explainable AI, Springer Nature Singapore, Singapore, 2026.","mla":"Thommes, Kirsten. “Evaluation Principles.” <i>Social Explainable AI</i>, Springer Nature Singapore, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_26\">10.1007/978-981-96-5290-7_26</a>.","bibtex":"@inbook{Thommes_2026, place={Singapore}, title={Evaluation Principles}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_26\">10.1007/978-981-96-5290-7_26</a>}, booktitle={Social Explainable AI}, publisher={Springer Nature Singapore}, author={Thommes, Kirsten}, year={2026} }","apa":"Thommes, K. (2026). Evaluation Principles. In <i>Social Explainable AI</i>. Springer Nature Singapore. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_26\">https://doi.org/10.1007/978-981-96-5290-7_26</a>"},"date_updated":"2026-03-23T08:47:38Z","publisher":"Springer Nature Singapore","date_created":"2026-03-23T08:47:10Z","author":[{"last_name":"Thommes","full_name":"Thommes, Kirsten","id":"72497","first_name":"Kirsten"}],"title":"Evaluation Principles","doi":"10.1007/978-981-96-5290-7_26"}]
