@inproceedings{65030,
  author       = {{Amaral, Luis and Schlichtig, Michael and Emanuel, Wagner and Almeida, Joilton and Ferreira, Carine and Kempf, Jérôme and Bonifácio, Rodrigo and Bodden, Eric and Peotta, Laerte and Pinto, Gustavo and Ribeiro, Márcio}},
  booktitle    = {{2026 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)}},
  title        = {{{From Legacy Designs to Vulnerability Fixes: Understanding SAST Adoption in Non-Technological Companies}}},
  year         = {{2026}},
}

@unpublished{65031,
  abstract     = {{We prove that two-step nilpotent $p$-extensions of rational global function fields of characteristic $p$ satisfy a quantitative local-global principle when they are counted according to their largest upper ramification break ("last jump"). We had previously shown this only for $p\neq2$. Compared to our previous proof, this proof is also more self-contained, and may apply to heights other than the last jump. As an application, we describe the distribution of last jumps of $D_4$-extensions of rational global function fields of characteristic $2$. We also exhibit a counterexample to the analogous local-global principle when counting by discriminants.}},
  author       = {{Gundlach, Fabian and Seguin, Beranger Fabrice}},
  booktitle    = {{arXiv:2603.15544}},
  title        = {{{Lifts of unramified twists and local-global principles}}},
  year         = {{2026}},
}

@inbook{60018,
  author       = {{Grauthoff, Fabian and Kreter, Maximilian}},
  booktitle    = {{Speak Up! Einblicke in das ostbelgische Innovationslabor zum Umgang mit Fake News und Hate Speech}},
  editor       = {{Kirschner, Sabrina and Lask, Tomke}},
  pages        = {{247--278}},
  publisher    = {{transcript}},
  title        = {{{"I WON THIS ELECTION, BY A LOT!" - Fake News und die Frage nach Wahrheit. Donald Trump durch die Linsen von Arendt und Rüsen}}},
  year         = {{2026}},
}

@book{65035,
  author       = {{Priesching, Nicole and Hartig, Christine}},
  isbn         = {{9783657796625}},
  publisher    = {{Brill | Schöningh}},
  title        = {{{Sexuelle Gewalt an Minderjährigen im Erzbistum Paderborn. Eine historische Untersuchung (1941–2002)}}},
  doi          = {{10.30965/9783657796625}},
  year         = {{2026}},
}

@phdthesis{61464,
  author       = {{Hannebohm, Ronja}},
  isbn         = {{978-3-7705-7106-2}},
  publisher    = {{Brill | Fink}},
  title        = {{{Nach 1984: Biopoethik des eugenetischen Romans}}},
  year         = {{2026}},
}

@inproceedings{63754,
  abstract     = {{Data spaces are receiving an emerging interest in Information Systems Research and industry practice. They are central to many European research initiatives and shape the data economy in Industry 4.0. Generally, they aim to create secure environments for cross-organizational data management and sharing. Currently, there is considerable interest in developing new data spaces in Industry 4.0, also accelerated through regulatory changes. However, key questions about what precisely characterizes a data space in Industry 4.0 remain unresolved. Against this backdrop, we build a taxonomy of data spaces in the Industry 4.0 context. We identified nine distinctive dimensions and 40 corresponding characteristics among the 19 data spaces analyzed. The taxonomy enables clearer classification and nomenclature of data spaces in this context. This short paper will ignite planned further research on data spaces in Industry 4.0 and contribute to a conceptualization of a taxonomic theory for interested researchers.}},
  author       = {{Werth, Oliver and Koldewey, Christian and Uslar, Mathias and Zerbin, Julian}},
  booktitle    = {{Lecture Notes in Business Information Processing}},
  isbn         = {{9783032145178}},
  issn         = {{1865-1348}},
  keywords     = {{Industry 4.0, Taxonomy, Data spaces, Characterization}},
  location     = {{Stuttgart, Germany}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{What Characterizes Data Spaces in Industry 4.0? Towards a Better Understanding}}},
  doi          = {{10.1007/978-3-032-14518-5_3}},
  year         = {{2026}},
}

@article{65037,
  abstract     = {{<jats:title>ABSTRACT</jats:title>
                  <jats:p>Homogenization methods simulate heterogeneous materials like composites effectively, but high computational demands can offset their benefits. This work balances accuracy and efficiency by assessing model and discretization errors of the finite element method (FEM) through an adaptive numerical scheme. Two model hierarchies are introduced, combining mean‐field and full‐field methods, and nonuniform transformation field analysis (NTFA) with full‐field methods. Both hierarchies use a full‐field FEM solution of the representative volume element (RVE) as reference. The study highlights the benefits of using effective constitutive equations from mean‐field and full‐field methods as well as NTFA methods, with a goal‐oriented a posteriori error estimator based on duality techniques controlling mesh and model errors in a forwards‐in‐time manner.</jats:p>}},
  author       = {{Simeu, Arnold Tchomgue and Caylak, Ismail and Ostwald, Richard}},
  issn         = {{0029-5981}},
  journal      = {{International Journal for Numerical Methods in Engineering}},
  number       = {{6}},
  publisher    = {{Wiley}},
  title        = {{{Mesh and Model Adaptivity for Multiscale Elastoplastic Models With Prandtl‐Reuss Type Material Laws}}},
  doi          = {{10.1002/nme.70294}},
  volume       = {{127}},
  year         = {{2026}},
}

@unpublished{65036,
  author       = {{Cohen, Tal and Glöckner, Helge and Goffer, Gil and Lederle, Waltraud}},
  title        = {{{Compact invariant random subgroups}}},
  year         = {{2026}},
}

@article{64997,
  author       = {{Trienens, Dorte and Brüning, Florian and Schöppner, Volker}},
  journal      = {{kunststoffland NRW report}},
  keywords     = {{extrusion, KI, Simulation}},
  number       = {{03-2025}},
  pages        = {{24–25}},
  title        = {{{Wo Forschung, KI und Praxis aufeinandertreffen}}},
  year         = {{2026}},
}

@inproceedings{65041,
  author       = {{Landgräber, Jan and Schöppner, Volker}},
  title        = {{{Verbesserung des Einzugsverhaltens im Spritzgießprozess - Numerik und Experiment}}},
  year         = {{2026}},
}

@misc{64981,
  author       = {{Münzmay, Andreas}},
  booktitle    = {{Die Musikforschung}},
  issn         = {{0027-4801}},
  number       = {{1}},
  pages        = {{99--102}},
  publisher    = {{Sachsische Landesbibliothek, Staats- und Universitatsbibliothek Dresden}},
  title        = {{{ISABELLE WIMMER: Goethe auf der französischen Opernbühne. Zur künstlerischen Goethe-Rezeption im 19. Jahrhundert. Baden-Baden: Rombach Wissenschaft 2024. 587 S., Tab. (Reihe Musik und Literatur. Band 4.)}}},
  doi          = {{10.52412/mf.2026.h1.5145}},
  volume       = {{79}},
  year         = {{2026}},
}

@article{65053,
  author       = {{Fuchs, Christian}},
  journal      = {{ORF Public Value Texte }},
  number       = {{30}},
  pages        = {{63--67}},
  title        = {{{Why We Need a Public Service Internet, not the Defunding of Public Service Media. }}},
  year         = {{2026}},
}

@book{65056,
  editor       = {{Kalusche, Katinka and Bormuth, Heike and Meyer-Hamme, Johannes}},
  title        = {{{Selbstständig. Geschichtslernen als Befähigung zum historischen Denken. Festschrift für Andreas Körber}}},
  year         = {{2026}},
}

@article{65054,
  author       = {{Jenert, Tobias and Kremer, H.-Hugo and Kückmann, Marie-Ann and Sänger, Niklas and Schmid, Leonie and Wilde, Stephanie}},
  journal      = {{bwp@ Spezial 23}},
  title        = {{{Kontextualisierung von Lehren und Lernen: Didaktische Einbettung und Implikationen eines virtuellen Berufskollegs zur Förderung von Professionalisierungsprozessen in der beruflichen Lehrkräftebildung}}},
  year         = {{2026}},
}

@inbook{64735,
  author       = {{Jenert, Tobias and Kremer, H.-Hugo and Kückmann, Marie-Ann and Sänger, Niklas and Schmid, Leonie and Wilde, Stephanie}},
  booktitle    = {{Handlungsorientierung in der Ausbildung von Lehrkräften und pädagogischen Fachkräften - Konzeptionen und Forschungsperspektiven}},
  editor       = {{Vogelsang, Christoph and Grotegut, Lea and Bruns, Julia and Fechner, Sabine}},
  publisher    = {{Waxmann}},
  title        = {{{Professionelle Entwicklung für das Lehramt an Berufskollegs. Theoretische Analysen besonderer Kompetenzanforderungen und Konsequenzen für die Studienganggestaltung}}},
  doi          = {{https://doi.org/10.31244/9783818851057}},
  volume       = {{2}},
  year         = {{2026}},
}

@inbook{65061,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    One of the purposes for which XAI is often brought into play is to enable a user to act responsibly. However, responsibility is a complex normative and social phenomenon that we unfold in this chapter. We consider that the classical concepts of agency and responsibility do not fully capture what is needed for meaningful collaboration between human users and XAI. Advocating the perspective of sXAI, we argue that the growing adaptivity of AI systems will result in sXAI being considered as partners. Both partners adopt particular (dialogical) roles within a collaborative process and take responsibility for them. We expect that these roles lead to reactive attitudes toward the sXAI on the side of the human partners that make these roles relational. They resemble those reactive attitudes that we hold toward other human agents. For agents to exercise their responsibility, they need to possess agential capacities to fulfill their role with respect to the structure of a social interaction. Hence, sXAI can be expected to act responsibly. But because of XAI’s limited normative capacities, it might rather act as a marginal agent. We refer to marginal agents and show they can be scaffolded with regard to their agential capacities and their knowledge about the structure of a social interaction. The structure links the actions of the partners to each other in terms of a set of stimuli and responses to it in pursuit of a particular goal. Hence, it is important to differentiate between the different goals that a structure can impose for exercising responsibility. Therefore, we follow (Responsibility from the margins. Oxford University Press; 2015.
                    <jats:ext-link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001" ext-link-type="uri">https://doi.org/10.1093/acprof:oso/9780198715672.24001.0001</jats:ext-link>
                    ) and offer three structures that can help to organize responsibility for
                    <jats:italic>decisions made</jats:italic>
                    with the assistance of AI systems. These structures are attributability, answerability, and accountability. Our insights will inform the development and design process of XAI to meet the guiding principles of responsible research and innovation as well as trustworthy AI.
                  </jats:p>}},
  author       = {{Rohlfing, Katharina J. and Alpsancar, Suzana and Schulte, Carsten}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{157--177}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Responsibilities in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_9}},
  year         = {{2026}},
}

@inbook{65063,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>
                    This chapter critically examines how social explainable AI (sXAI) can better support AI practitioners in ensuring fairness in AI-based decision-making. We argue for a fundamental shift: Fairness should be understood not as a technical property or an information problem, but as a matter of vulnerability—focusing on the real-world impacts of AI on individuals and groups, especially those most at risk. Hereby, we call for a shift in perspective: from fair AI to
                    <jats:italic>tasking AI fairly</jats:italic>
                    . To motivate our vulnerability approach, we review the “Dutch welfare fraud scandal” (system risk indication—SyRI) and current challenges in the field of fair AI/machine learning (ML). Vulnerability of a person or members of a definable group of persons is a complex relational notion, and not a technical property of a technical system. Accordingly, we suggest several nontechnical strategies that hold the promise to compensate for the insufficiency of purely technical approaches to fairness and other ethical issues in the practical use of AI-based systems. To discuss how sXAI, due to its interactive and adaptive social character, might better fulfill this role than current XAI techniques, we provide a toy scenario for how sXAI might support the virtuous AI practitioner in an ethical inquiry. Finally, we also address challenges and limits of our approach.
                  </jats:p>}},
  author       = {{Alpsancar, Suzana and Stamboliev, Eugenia}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{557--581}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{Tasking AI Fairly. How to Empower AI Practitioners With sXAI?}}},
  doi          = {{10.1007/978-981-96-5290-7_29}},
  year         = {{2026}},
}

@inbook{65064,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>XAI can minimize the risks of being manipulated and deceived by AI but in turn entails other specific risks. This also applies to sXAI, and the specifically social character of sXAI harbors particular risks that designers and developers should be aware of. In this chapter, we shall discuss the potential opportunities and risks of sXAI. We see a particularly positive potential in the social character of sXAI, which lies in the fact that skillful users, including those with “healthy distrust,” can use the adaptivity of sXAI to produce an explanation that is actually relevant and adequate for them. However, this requires a high level of skills on the part of the user and is thus in contrast to the general promise of efficiency in the use of AI. A potential risk of XAI is that it can be (even more) persuasive, as the interactive involvement and the anthropomorphism strengthen a trustworthy appearance/performance (independent of the adequacy of the sXAI performance).</jats:p>}},
  author       = {{Alpsancar, Suzana and Klenk, Michael}},
  booktitle    = {{Social Explainable AI}},
  isbn         = {{9789819652891}},
  pages        = {{583--616}},
  publisher    = {{Springer Nature Singapore}},
  title        = {{{The Risk of Manipulation and Deception in sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_30}},
  year         = {{2026}},
}

@inbook{62709,
  author       = {{Reijers, Wessel and Alpsancar, Suzana}},
  booktitle    = {{Social explainable AI. Communications of NII Shonan Meetings}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{179--195}},
  publisher    = {{Springer}},
  title        = {{{Values and Norms in sXAI}}},
  year         = {{2026}},
}

@book{57167,
  editor       = {{Alpsancar, Suzana and Friedrich, Alexander and Gehring, Petra and Kaminski, Andreas and Nordmann, Alfred}},
  isbn         = {{978-3-7560-1830-7}},
  publisher    = {{Nomos}},
  title        = {{{Jahrbuch Technikphilosophie. Täuschung und Illusion. 10. Jahrgang 2025}}},
  year         = {{2026}},
}

