@article{64789,
  author       = {{Beer, Fabian and Schulz, Christian}},
  journal      = {{RESET Journal (Recherches sciences sociale sur internet) Special Issue: Towards New Social and Historical Studies of Artificial Intelligence}},
  publisher    = {{Open Edition Journals}},
  title        = {{{AI has never been “inherently interpretable”: On a paradoxical origin of eXplainable AI (XAI)}}},
  year         = {{2026}},
}

@unpublished{65073,
  abstract     = {{We study the large-time behavior of the continuous-time heat kernel and of solutions to the heat equation on homogeneous trees. First, we derive sharp asymptotic formulas for the heat kernel as $t\to\infty$. Second, using them, we show that solutions with initial data in weighted $\ell^1$ classes, asymptotically factorize in $\ell^p$ norms, $p\in[1,\infty]$, as the product of the heat kernel, times a $p$-mass function, dependent on the initial condition and $p$. The  $p$-mass function is described in terms of boundary averages associated with Busemann functions for $p<2$, while for $p\ge 2$, it is expressed through convolution with the ground spherical function. For comparison, the case of the integers shows that a single constant mass determines the asymptotics of solutions to the heat equation for all $p$, emphasizing the influence of the graph geometry on heat diffusion.}},
  author       = {{Papageorgiou, Efthymia}},
  booktitle    = {{2603.11232}},
  title        = {{{Long-time asymptotics for the heat kernel and for heat equation solutions on homogeneous trees}}},
  year         = {{2026}},
}

@inbook{55598,
  author       = {{Schulz, Christian}},
  booktitle    = {{Handbuch Social Media: Geschichte – Kultur – Ästhetik}},
  editor       = {{Dörre, Robert and Tuschling, Anna }},
  publisher    = {{Metzler Verlag}},
  title        = {{{Feeds. Ein zentrales Strukturprinzip sozialer Medien}}},
  year         = {{2026}},
}

@article{65082,
  abstract     = {{<jats:p>Encoding information in molecular arrangements on DNA origami nanostructures (DONs) provides the basis for novel concepts in molecular data storage and computing. To preserve their integrity over long timescales, the information‐carrying DONs are often stored in a frozen state. Here, we investigate the effect of repeated freeze–thaw (F/T) cycles on the structural and functional integrity of DONs carrying biotin (Bt) modifications. Streptavidin (SAv) binding is used to visualize the stored information by atomic force microscopy (AFM) before and after 40 F/T cycles. Two strategies are compared by F/T cycling of (I) SAv‐bound DONs and (II) SAv‐free DONs that are exposed to SAv directly before AFM imaging. Our results reveal that while the DONs retain their overall shape, F/T cycling induces a small amount of damage, leading to slightly reduced SAv binding. Adding glycerol at mM concentrations efficiently protects the DONs and restores the original SAv binding yields. Nevertheless, SAv exposure after F/T cycling leads to slightly higher and more consistent SAv binding yields and a lower background of nonspecifically adsorbed SAv compared to Strategy I. This makes information readout by AFM more efficient and renders Strategy II more convenient for long‐term storage of information‐carrying DONs with repeated information readout.</jats:p>}},
  author       = {{Li, Xinyang and Rabbe, Lukas and Linneweber, Jacqueline and Grundmeier, Guido and Keller, Adrian Clemens}},
  issn         = {{2628-9725}},
  journal      = {{Chemistry–Methods}},
  number       = {{3}},
  publisher    = {{Wiley}},
  title        = {{{Stability of Information‐Carrying DNA Origami Nanostructures During Repeated Freeze–Thaw Cycles}}},
  doi          = {{10.1002/cmtd.202500161}},
  volume       = {{6}},
  year         = {{2026}},
}

@article{65081,
  author       = {{Schwede, Jana}},
  journal      = {{berufsbildung}},
  number       = {{1}},
  pages        = {{44--46}},
  publisher    = {{wbv}},
  title        = {{{Drei Lernorte, (k)ein Zusammenwirken? Lernortkooperation im Spannungsfeld von Anspruch und Wirklichkeit}}},
  doi          = {{https://doi.org/10.3278/BB2601W}},
  volume       = {{80}},
  year         = {{2026}},
}

@inbook{65078,
  author       = {{Schroeter-Wittke, Harald}},
  booktitle    = {{Auf der Suche nach Frieden. Evangelische Kirchentage in Ost und West seit 1949}},
  editor       = {{Kuhn, Thomas K. and David, Philipp}},
  pages        = {{253--271}},
  publisher    = {{Evangelische Verlagsanstalt}},
  title        = {{{"Mitten unter euch"?! Frieden und Bibelarbeit auf Kirchentagen}}},
  year         = {{2026}},
}

@article{65077,
  author       = {{Schroeter-Wittke, Harald}},
  journal      = {{Praktische Theologie}},
  pages        = {{82--84}},
  title        = {{{Amor mio, perche piangi? Meine Liebe, warum weinst du? Zum 450. Geburtstag von Vittoria/Raffaella Aleotti (1575 - um 1646)}}},
  volume       = {{61}},
  year         = {{2026}},
}

@inbook{65090,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>If XAI are to become social XAI, XAI methods must have capabilities enabling them to ‘extract’ information about the underlying AI model and to generate explanatory content based on that information. In a dialog between explainer and explainee, the explanans presented in every explanation move have to relate to each other understandably and coherently in order to remain trustworthy. This signifies that the generated explanantia have to be consistent—independently of what question is answered by each explanans, in what modality, in what vocabulary, and at what level of abstraction. Moreover, it is advantageous to be able to provide a rich palette of different kinds of explanantia in order to be able to have a fluent dialog in which the explanantia can be generated and adapted to the context, the explainee, feedback, reactions during the interaction with the explainee, and so forth. This chapter attempts to identify relevant questions that an explainee might ask during an explanatory dialog, and it assesses to what extent different XAI methods are capable of addressing these questions in a coherent way. The Contextual Importance and Utility (CIU) method is used to illustrate how an XAI method can generate explanantia for most of the identified questions. CIU also provides a flexibility in how explanatory content is generated that makes it possible to create a meaningful dialog with the explainee.</jats:p>}},
  author       = {{Främling, Kary and Thommes, Kirsten and Wrede, Britta}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Generation of Explanatory Content and Requirements for Social XAI}}},
  doi          = {{10.1007/978-981-96-5290-7_15}},
  year         = {{2026}},
}

@inbook{65088,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Quantitatively evaluating the benefits of eXplainable Artificial Intelligence (XAI) and social XAI for humans is not a trivial pursuit. Therefore, we categorize the potential measures in terms of subjective and objective outcomes and short- and long-term outcomes of interactive social XAI. When reviewing the current state of the art, we observed some measurement problems in the literature: (a) Researchers do not clearly state whether they want to measure the inner state of users, users’ behavioral response, or the overall AI-human collaborative performance. (b) Moreover, most measures implicitly assume that all humans either do not react or improve in attitudes or performance. Psychological reactance (feeling or doing the opposite) is usually not captured. (c) Many researchers invent their own scale when measuring psychological constructs, thereby jeopardizing the validity of their measures and slowing down progress in the field, because general evidence and subsequent learning can be achieved only by collecting many compatible pieces of evidence. (d) Most studies look into short-term outcomes and neglect that experiences in social interactions with XAI may evolve and have long-term outcomes not only for the individual but also for groups or society at large.</jats:p>}},
  author       = {{Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Measuring the Outcome of sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_28}},
  year         = {{2026}},
}

@inbook{65086,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Explainable AI (XAI) aims to make the decisions and behavior of an AI understandable to the people interacting with it and to those affected by its outcomes. To make XAI social, real-world XAI systems need to simulate not only the ways in which human explainers behave within explanatory dialogs but also the ways in which such dialogs can successfully achieve the intended understanding on the explainee’s side. This, in turn, requires an operationalization of the three core aspects of social XAI: multimodality, incrementality, and patterns. This chapter lays the ground for this goal by defining a basic operational model of social interactions that can be refined and extended to account for the specificities of any explanatory real-world setting. This serves as a basis for summarizing and discussing existing ideas from explainability research and related areas in order to operationalize each core aspect. Selected examples and case studies illustrate how to concretely realize such an operationalization, thereby serving as a starting point for future research on social interaction with XAI.</jats:p>}},
  author       = {{Wachsmuth, Henning and Thommes, Kirsten and Alshomary, Milad}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Operationalizing Social Interaction}}},
  doi          = {{10.1007/978-981-96-5290-7_27}},
  year         = {{2026}},
}

@inbook{65091,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>This chapter examines key challenges and potential improvements in the areas of user interaction and dynamic explanations. It highlights the need for XAI systems to address context factors beyond their predefined scope, it points to the potential need to cocreate new concepts that are adapted to particular explainees, and it provides a clear overview of the XAI system’s underlying knowledge structure and interaction steps. Emphasis is placed on mixed-initiative interaction in which the system can lead or respond based on the context and the explainee’s reactions while asserting the importance of maintaining coherence across consecutive explanations. These advances aim to make XAI systems more flexible, interactive, and user-centric. An operationalization section outlines how such social XAI systems could be implemented based on the XAI capabilities provided by the Contextual Importance and Utility XAI method described in the previous chapter.</jats:p>}},
  author       = {{Främling, Kary and Wrede, Britta and Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Exploration of Explaining Content}}},
  doi          = {{10.1007/978-981-96-5290-7_16}},
  year         = {{2026}},
}

@inbook{65087,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Much research in XAI focuses on single, one-shot interactions, implicitly assuming that interactions have no past, no future, and no surroundings. Although this assumption may be necessary for many empirical research settings, it is overly simplifying and unrealistic. Whereas empirical research focuses on a world in which no social context exists, real applications are embedded in a temporal (past and future) and social context. Social science research shows that repeated interactions and secondhand knowledge in the social space massively affect human attitudes and behaviors. This chapter explains how not only repeated interactions between XAI and humans but also the social space and secondhand information may affect social XAI research.</jats:p>}},
  author       = {{Thommes, Kirsten and Främling, Kary and Wrede, Britta and Kubler, Sylvain}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Interaction History in Social XAI}}},
  doi          = {{10.1007/978-981-96-5290-7_17}},
  year         = {{2026}},
}

@inbook{65089,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>In the past, there has been much research aiming to evaluate XAI practices—that is, explanations that can add to a user’s understanding of “why” or “why not.” However, because there is such a huge amount of diversity in social contexts, optimizing for the mean neglects the social dimensions of to whom, what, why, when, and where explanations are provided. Nonetheless, these dimensions matter. We give some brief examples on the accuracy of the mental model (as an example for who?), on measuring explanation practices (as an example of what?), on human motivation (as an example of why?), on repeated interactions (as an example of when), and on bystander effects (as an example of where?). Importantly, controlling for these factors (or randomizing them) is as important as attempting to perform external validations.</jats:p>}},
  author       = {{Thommes, Kirsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Evaluation Principles}}},
  doi          = {{10.1007/978-981-96-5290-7_26}},
  year         = {{2026}},
}

@inproceedings{63469,
  author       = {{Knickenberg, Margarita and Löper, Marwin Felix and Grosche, Michael and Grüßing, Meike and Hellmich, Frank}},
  publisher    = {{Technische Universität München}},
  title        = {{{Förderung sozial-emotionaler Kompetenzen von Kindern für das kooperative Lernen im diversitätssensiblen Mathematikunterricht der Grundschule (soko-M). Posterpräsentation auf der 13. Tagung der Gesellschaft für Empirische Bildungsforschung (GEBF). Thema: „Bildungsforschung für technologiebedingte gesellschaftliche Entwicklungen“.}}},
  year         = {{2026}},
}

@article{65093,
  author       = {{Marten, Thorsten and Ostermann, Moritz and Behm, Jonathan and Leitenmaier, Samuel}},
  issn         = {{21991944}},
  journal      = {{Berufsbildung - Zeitschrift für Theorie-Praxis-Dialog}},
  number       = {{1}},
  pages        = {{23--27}},
  publisher    = {{wbv Publikation}},
  title        = {{{NeMo.bil - Individualisierter öffentlicher Personennahverkehr - iÖV}}},
  doi          = {{10.3278/BB2601}},
  volume       = {{209}},
  year         = {{2026}},
}

@inproceedings{61542,
  author       = {{Hellmich, Frank and Löper, Marwin Felix and Görel, Gamze}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Möglichkeiten der Förderung der sozialen Partizipation in der inklusiven Grundschule. Workshop auf dem 10. Paderborner Grundschultag. Thema: „Zu­kunft ge­mein­sam ge­stal­ten – Bildung für nach­hal­ti­ge Ent­wick­lung von An­fang an“. }}},
  year         = {{2026}},
}

@inproceedings{65101,
  abstract     = {{Various methods to measure the dynamic behavior of particles require the calculation of autocorrelation functions. For this purpose, fast multi-tau correlators have been developed in dedicated hardware, in software, and on FPGAs. However, for methods such as X-ray Photon Correlation Spectroscopy (XPCS), which requires to calculate the autocorrelation function independently for hundreds of thousands to millions of pixels from high-resolution detectors, current approaches rely on offline processing after data acquisition. Moreover, the internal pipeline state of so many independent correlators is far too large to keep it on-chip. In this work, we propose a design approach on FPGAs, where pipeline contexts are stored in off-chip HBM memory. Each compute unit iteratively loads the state for a single pixel, processes a short time series for this pixel, and afterwards writes back the context in a dataflow pipeline. We have implemented the required compute kernels with Vitis HLS and analyze resulting designs on an Alveo U280 card. The design achieves the expected performance and for the first time provides sufficient throughput for current high-end detectors used in XPCS.}},
  author       = {{Tareen, Abdul Rehman and Plessl, Christian and Kenter, Tobias}},
  booktitle    = {{2025 International Conference on Field Programmable Technology (ICFPT)}},
  publisher    = {{IEEE}},
  title        = {{{Fast Multi-Tau Correlators on FPGA with Context Switching From and to High- Bandwidth Memory}}},
  doi          = {{10.1109/icfpt67023.2025.00027}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

