@article{63401,
  author       = {{Thommes, Kirsten and Mehic, Miro}},
  issn         = {{0965-8564}},
  journal      = {{Transportation Research Part A: Policy and Practice}},
  publisher    = {{Elsevier BV}},
  title        = {{{The persistence of default effects: Evidence from CO2 offsetting in cargo transportation}}},
  doi          = {{10.1016/j.tra.2025.104838}},
  volume       = {{204}},
  year         = {{2026}},
}

@book{65065,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This introduction sets the stage for the present book. Whereas research in eXplainable AI (XAI) is motivated by societal changes and values, technology development largely ignores social aspects. This book aims to address this research gap with a systematic and comprehensive social view on explainable AI. Besides introducing many relevant concepts, the book offers first access to their possible implementation, thus advancing the development of more social XAI. The introduction starts by connecting the topic to the general research field of XAI. The second part defines the novel approach of social eXplainable AI (sXAI) along the three characteristics of social interaction such as patternedness, incrementality, and multimodality. Finally, the third part explains the structure followed by each chapter. The book offers insights not only for readers who work on technology development but also for those working in sociotechnical fields. Addressing an interdisciplinary readership, the book is an invitation for more exchange and further development of the sXAI field.</jats:p>}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Social Explainable AI}}},
  doi          = {{10.1007/978-981-96-5290-7_1}},
  year         = {{2026}},
}

@inbook{65090,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>If XAI are to become social XAI, XAI methods must have capabilities enabling them to ‘extract’ information about the underlying AI model and to generate explanatory content based on that information. In a dialog between explainer and explainee, the explanans presented in every explanation move have to relate to each other understandably and coherently in order to remain trustworthy. This signifies that the generated explanantia have to be consistent—independently of what question is answered by each explanans, in what modality, in what vocabulary, and at what level of abstraction. Moreover, it is advantageous to be able to provide a rich palette of different kinds of explanantia in order to be able to have a fluent dialog in which the explanantia can be generated and adapted to the context, the explainee, feedback, reactions during the interaction with the explainee, and so forth. This chapter attempts to identify relevant questions that an explainee might ask during an explanatory dialog, and it assesses to what extent different XAI methods are capable of addressing these questions in a coherent way. The Contextual Importance and Utility (CIU) method is used to illustrate how an XAI method can generate explanantia for most of the identified questions. CIU also provides a flexibility in how explanatory content is generated that makes it possible to create a meaningful dialog with the explainee.</jats:p>}},
  author       = {{Främling, Kary and Thommes, Kirsten and Wrede, Britta}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Generation of Explanatory Content and Requirements for Social XAI}}},
  doi          = {{10.1007/978-981-96-5290-7_15}},
  year         = {{2026}},
}

@inbook{65088,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Quantitatively evaluating the benefits of eXplainable Artificial Intelligence (XAI) and social XAI for humans is not a trivial pursuit. Therefore, we categorize the potential measures in terms of subjective and objective outcomes and short- and long-term outcomes of interactive social XAI. When reviewing the current state of the art, we observed some measurement problems in the literature: (a) Researchers do not clearly state whether they want to measure the inner state of users, users’ behavioral response, or the overall AI-human collaborative performance. (b) Moreover, most measures implicitly assume that all humans either do not react or improve in attitudes or performance. Psychological reactance (feeling or doing the opposite) is usually not captured. (c) Many researchers invent their own scale when measuring psychological constructs, thereby jeopardizing the validity of their measures and slowing down progress in the field, because general evidence and subsequent learning can be achieved only by collecting many compatible pieces of evidence. (d) Most studies look into short-term outcomes and neglect that experiences in social interactions with XAI may evolve and have long-term outcomes not only for the individual but also for groups or society at large.</jats:p>}},
  author       = {{Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Measuring the Outcome of sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_28}},
  year         = {{2026}},
}

@inbook{65086,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Explainable AI (XAI) aims to make the decisions and behavior of an AI understandable to the people interacting with it and to those affected by its outcomes. To make XAI social, real-world XAI systems need to simulate not only the ways in which human explainers behave within explanatory dialogs but also the ways in which such dialogs can successfully achieve the intended understanding on the explainee’s side. This, in turn, requires an operationalization of the three core aspects of social XAI: multimodality, incrementality, and patterns. This chapter lays the ground for this goal by defining a basic operational model of social interactions that can be refined and extended to account for the specificities of any explanatory real-world setting. This serves as a basis for summarizing and discussing existing ideas from explainability research and related areas in order to operationalize each core aspect. Selected examples and case studies illustrate how to concretely realize such an operationalization, thereby serving as a starting point for future research on social interaction with XAI.</jats:p>}},
  author       = {{Wachsmuth, Henning and Thommes, Kirsten and Alshomary, Milad}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Operationalizing Social Interaction}}},
  doi          = {{10.1007/978-981-96-5290-7_27}},
  year         = {{2026}},
}

@inbook{65091,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This chapter examines key challenges and potential improvements in the areas of user interaction and dynamic explanations. It highlights the need for XAI systems to address context factors beyond their predefined scope, it points to the potential need to cocreate new concepts that are adapted to particular explainees, and it provides a clear overview of the XAI system’s underlying knowledge structure and interaction steps. Emphasis is placed on mixed-initiative interaction in which the system can lead or respond based on the context and the explainee’s reactions while asserting the importance of maintaining coherence across consecutive explanations. These advances aim to make XAI systems more flexible, interactive, and user-centric. An operationalization section outlines how such social XAI systems could be implemented based on the XAI capabilities provided by the Contextual Importance and Utility XAI method described in the previous chapter.</jats:p>}},
  author       = {{Främling, Kary and Wrede, Britta and Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Exploration of Explaining Content}}},
  doi          = {{10.1007/978-981-96-5290-7_16}},
  year         = {{2026}},
}

@inbook{65087,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Much research in XAI focuses on single, one-shot interactions, implicitly assuming that interactions have no past, no future, and no surroundings. Although this assumption may be necessary for many empirical research settings, it is overly simplifying and unrealistic. Whereas empirical research focuses on a world in which no social context exists, real applications are embedded in a temporal (past and future) and social context. Social science research shows that repeated interactions and secondhand knowledge in the social space massively affect human attitudes and behaviors. This chapter explains how not only repeated interactions between XAI and humans but also the social space and secondhand information may affect social XAI research.</jats:p>}},
  author       = {{Thommes, Kirsten and Främling, Kary and Wrede, Britta and Kubler, Sylvain}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Interaction History in Social XAI}}},
  doi          = {{10.1007/978-981-96-5290-7_17}},
  year         = {{2026}},
}

@inbook{65089,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>In the past, there has been much research aiming to evaluate XAI practices—that is, explanations that can add to a user’s understanding of “why” or “why not.” However, because there is such a huge amount of diversity in social contexts, optimizing for the mean neglects the social dimensions of to whom, what, why, when, and where explanations are provided. Nonetheless, these dimensions matter. We give some brief examples on the accuracy of the mental model (as an example for who?), on measuring explanation practices (as an example of what?), on human motivation (as an example of why?), on repeated interactions (as an example of when), and on bystander effects (as an example of where?). Importantly, controlling for these factors (or randomizing them) is as important as attempting to perform external validations.</jats:p>}},
  author       = {{Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Evaluation Principles}}},
  doi          = {{10.1007/978-981-96-5290-7_26}},
  year         = {{2026}},
}

@techreport{63596,
  abstract     = {{<jats:p>Im Rahmen der vorliegenden Studie wird der Status quo sowie die Entwicklung des Einsatzes von KI in der industriellen Arbeitswelt in OstWestfalenLippe beschrieben. Ziel ist es, eine belastbare Grundlage für die Gestaltung KI-gestützter Arbeitsprozesse zu schaffen und bedarfsbezogene Maßnahmen abzuleiten.
Die Befragungen wurden in den Jahren 2021 und 2025 vom Kompetenzzentrum Arbeitswelt.Plus und dem Spitzencluster it’s OWL durchgeführt. Im Jahr 2021 nahmen 318 Personen aus 89 Unternehmen teil. Im Jahr 2025 beteiligten sich 240 Personen aus 50 Unternehmen.
Die Ergebnisse zeigen eine deutliche Weiterentwicklung. Unternehmen bewegen sich zunehmend in Diskussions- und Einführungsphasen, und die tägliche Nutzung von KI durch die Mitarbeitenden steigt. Mit dem Durchbruch generativer KI rücken text- und wissensbezogene Anwendungen in den Vordergrund. Der Autonomiegrad der Lösungen ist überwiegend Mensch-unterstützend und dient der Entscheidungsvorbereitung. Zentrales Ziel für die Einführung von KI im Unternehmen bleibt die Effizienzsteigerung. Im Jahr 2025 gewinnen zudem die Unterstützung der Mitarbeitenden im Arbeitsalltag und die Reduzierung von Belastungen an Bedeutung, was die Relevanz der Mensch-KI-Zusammenarbeit unterstreicht.
Als zentrale Herausforderungen zeigt sich die hohe Komplexität von KI, die die Einführung im Unternehmen erschwert. Weitere Herausforderungen sind fehlende Kompetenzen, die durch gezielte Qualifizierung aufgebaut werden müssen. Dabei besteht eine Diskrepanz zwischen Selbst- und Fremdwahrnehmung hinsichtlich der Schulungsangebote. Zudem muss Datensicherheit gewährleistet werden, passende Anwendungsfälle ausgewählt werden und Entscheidungsprozesse beschleunigt werden. Demgegenüber wurde herausgefunden, dass den Mitarbeitenden in den befragten Unternehmen im Jahr 2025 grundsätzlich das Ziel und der Mehrwert der Einführung von KI klar sind und sie die Stärken und Grenzen von KI verstehen.
Die gewonnenen Erkenntnisse fließen in die Gestaltung der Einführungsprozesse von KI im Unternehmen ein. Unternehmen sollten sich der genannten Herausforderungen bewusst sein, um passend zu reagieren und die Mensch-KI-Interaktion zu stärken. Eine ganzheitliche Sicht auf KI als Technologie, auf den Menschen und auf das Unternehmen ist für die Einführung von KI wichtig.</jats:p>}},
  author       = {{Dondorf, Verena and Lebedeva, Elena and Thommes, Kirsten and Dumitrescu, Roman}},
  publisher    = {{Kompetenzzentrum Arbeitswelt.Plus}},
  title        = {{{Evolution von KI in der industriellen Arbeitswelt}}},
  doi          = {{10.55594/baey2442}},
  year         = {{2025}},
}

@unpublished{63782,
  abstract     = {{Senders of messages prefer to communicate uncertainty verbally (e.g., something is likely to happen) rather than numerically (such as 75%), leaving receivers with imprecise information. While it is well established that receivers translate verbal probabilities into numerical values that systematically deviate from the intended numerical meaning, it is less clear how this discrepancy influences subsequent behavioral actions. Thus, the role of verbal versus numerical communication of uncertainty warrants additional attention, to investigate two critical questions: 1) whether differences in decision-making under uncertainty arise between these communication forms, and 2) whether such differences persist even when verbal phrases are translated accurately into the intended numerical meaning. By implementing a laboratory experiment, we show that individuals place significantly lower values on uncertain options with medium to high likelihoods when uncertainty is communicated verbally rather than numerically. This effect may lead to less rational decisions under verbal communication, particularly at high likelihoods. Those results remain consistent even if individuals translate verbal uncertainty correctly into the intended numerical uncertainty, implying that a biased behavioral response is not only induced by miscommunication. Instead, ambiguity about the exact meaning of a verbal phrase interferes with decision-making even beyond potential mistranslations. These findings tie in with previous research on ambiguity aversion, which has predominantly operationalized ambiguity through numerical ranges rather than verbal phrases. Based on our findings we conclude that managers should communicate uncertainty numerically, as verbal communication can unintentionally influence the decision-making process of employees.}},
  author       = {{Bodenberger, Robin and Thommes, Kirsten}},
  title        = {{{Words or Numbers? How Framing Uncertainties Affects Risk Assessment and Decision-Making}}},
  year         = {{2025}},
}

@unpublished{63783,
  author       = {{Thommes, Kirsten and Mehic, Miro}},
  publisher    = {{Elsevier BV}},
  title        = {{{Foreign Language Use, Attribution Error, and Newcomer Integration}}},
  year         = {{2025}},
}

@article{58939,
  author       = {{Kornowicz, Jaroslaw and Thommes, Kirsten}},
  journal      = {{Plos One}},
  title        = {{{Algorithm, expert, or both? Evaluating the role of feature selection methods on user preferences and reliance}}},
  doi          = {{10.1371/journal.pone.0318874}},
  year         = {{2025}},
}

@article{60280,
  author       = {{Heinovski, Julian and Ergenç, Doǧanalp and Thommes, Kirsten and Dressler, Falko}},
  issn         = {{2687-7813}},
  journal      = {{IEEE Open Journal of Intelligent Transportation Systems}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Incentive-Based Platoon Formation: Optimizing the Personal Benefit for Drivers}}},
  doi          = {{10.1109/ojits.2025.3580464}},
  year         = {{2025}},
}

@article{61137,
  abstract     = {{Prior research shows that social norms can reduce algorithm aversion, but little is known about how such norms become established. Most accounts emphasize technological and individual determinants, yet AI adoption unfolds within organizational social contexts shaped by peers and supervisors. We ask whether the source of the norm-peers or supervisors-shapes AI usage behavior. This question is practically relevant for organizations seeking to promote effective AI adoption. We conducted an online vignette experiment, complemented by qualitative data on participants' feelings and justifications after (counter-)normative behavior. In line with the theory, counter-normative choices elicited higher regret than norm-adherent choices. On average, choosing AI increased regret compared to choosing an human. This aversion was weaker when AI use was presented as the prevailing norm, indicating a statistically significant interaction between AI use and an AI-favoring norm. Participants also attributed less blame to technology than to humans, which increased regret when AI was chosen over human expertise. Both peer and supervisor influence emerged as relevant factors, though contrary to expectations they did not significantly affect regret. Our findings suggest that regret aversion, embedded in social norms, is a central mechanism driving imitation in AI-related decision-making.}},
  author       = {{Kornowicz, Jaroslaw and Pape, Maurice and Thommes, Kirsten}},
  journal      = {{Arxiv}},
  title        = {{{Would I regret being different? The influence of social norms on attitudes toward AI usage}}},
  doi          = {{10.48550/ARXIV.2509.04241}},
  year         = {{2025}},
}

@inbook{61820,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>A scoring list is a sequence of simple decision models, where features are incrementally evaluated and scores of satisfied features are summed to be used for threshold-based decisions or for calculating class probabilities. In this paper, we introduce a new multi-class variant and compare it against previously introduced binary classification variants for incremental decisions, as well as multi-class variants for classical decision-making using all features. Furthermore, we introduce a new multi-class dataset to assess collaborative human-machine decision-making, which is suitable for user studies with non-expert participants. We demonstrate the usefulness of our approach by evaluating predictive performance and compared to the performance of participants without AI help.</jats:p>}},
  author       = {{Heid, Stefan and Kornowicz, Jaroslaw and Hanselle, Jonas and Thommes, Kirsten and Hüllermeier, Eyke}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783032083265}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{MSL: Multi-class Scoring Lists for Interpretable Incremental Decision-Making}}},
  doi          = {{10.1007/978-3-032-08327-2_6}},
  year         = {{2025}},
}

@article{61819,
  author       = {{Papenkordt, Jörg and Ngonga Ngomo, Axel-Cyrille and Thommes, Kirsten}},
  issn         = {{0144-929X}},
  journal      = {{Behaviour &amp; Information Technology}},
  pages        = {{1--22}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Are numerical or verbal explanations of AI the key to appropriate user reliance and error detection?}}},
  doi          = {{10.1080/0144929x.2025.2568928}},
  year         = {{2025}},
}

@article{52202,
  author       = {{Lammert, Olesja and Richter, Birte and Schütze, Christian and Thommes, Kirsten and Wrede, Britta}},
  journal      = {{Frontiers in Behavioral Economics}},
  title        = {{{Humans in XAI: Increased Reliance in Decision-Making Under Uncertainty by Using Explanation Strategies}}},
  doi          = {{10.3389/frbhe.2024.1377075}},
  year         = {{2024}},
}

@article{53611,
  author       = {{Hoffmann, Christin and Thommes, Kirsten}},
  issn         = {{0095-0696}},
  journal      = {{Journal of Environmental Economics and Management}},
  keywords     = {{Management, Monitoring, Policy and Law, Economics and Econometrics}},
  publisher    = {{Elsevier BV}},
  title        = {{{Can leaders motivate employees’ energy-efficient behavior with thoughtful communication?}}},
  doi          = {{10.1016/j.jeem.2024.102990}},
  year         = {{2024}},
}

@book{54972,
  editor       = {{Thommes, Kirsten and Iseke, Anja and Schneider, Martin}},
  isbn         = {{9783662688373}},
  issn         = {{2523-3637}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{Digitales und prädiktives Kompetenzmanagement}}},
  doi          = {{10.1007/978-3-662-68838-0}},
  year         = {{2024}},
}

@inproceedings{55177,
  author       = {{Thommes, Kirsten and Lammert, Olesja and Schütze, Christian and Richter, Birte and Wrede, Britta}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031638022}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Human Emotions in AI Explanations}}},
  doi          = {{10.1007/978-3-031-63803-9_15}},
  year         = {{2024}},
}

