@unpublished{61066,
  abstract     = {{Argumentation is a central subarea of Artificial Intelligence (AI) for
modeling and reasoning about arguments. The semantics of abstract argumentation
frameworks (AFs) is given by sets of arguments (extensions) and conditions on
the relationship between them, such as stable or admissible. Today's solvers
implement tasks such as finding extensions, deciding credulous or skeptical
acceptance, counting, or enumerating extensions. While these tasks are well
charted, the area between decision, counting/enumeration and fine-grained
reasoning requires expensive reasoning so far. We introduce a novel concept
(facets) for reasoning between decision and enumeration. Facets are arguments
that belong to some extensions (credulous) but not to all extensions
(skeptical). They are most natural when a user aims to navigate, filter, or
comprehend the significance of specific arguments, according to their needs. We
study the complexity and show that tasks involving facets are much easier than
counting extensions. Finally, we provide an implementation, and conduct
experiments to demonstrate feasibility.}},
  author       = {{Fichte, Johannes and Fröhlich, Nicolas and Hecher, Markus and Lagerkvist, Victor and Mahmood, Yasir and Meier, Arne and Persson, Jonathan}},
  booktitle    = {{arXiv:2505.10982}},
  title        = {{{Facets in Argumentation: A Formal Approach to Argument Significance}}},
  year         = {{2025}},
}

@article{61198,
  author       = {{Rogge, Tim and Herzig, Bardo}},
  journal      = {{education sciences}},
  number       = {{15}},
  publisher    = {{MDPI}},
  title        = {{{Enhancing Pre-Service Teachers' Reflective Competence Through Structured Video Annotation}}},
  year         = {{2025}},
}

@inbook{61150,
  abstract     = {{Since the emergence of the field of eXplainable Artificial Intelligence (XAI), a growing number of researchers have argued that XAI should consider insights from the social sciences in order to adapt explanations to the expectations and needs of human users. This has led to the emergence of a field called Social XAI, which is concerned with understanding how explanations are actively shaped in the interaction between a human user and an AI system. Recognizing this turn in XAI toward making XAI systems more “social” by providing explanations that focus on human information needs and incorporating insights from human–human explanatory interactions, in this paper we provide a formal foundation for Social XAI. We do so by proposing novel ontological accounts of the key terms used in Social XAI based on Basic Formal Ontology (BFO). Specifically, we provide novel ontological accounts for explanandum, explanans, understanding, explanation, explainer, explainee, and context. In doing so, we discuss multifaceted entities in Social XAI (having both continuant and occurrent facets; e.g., explanation) and the relationship between understanding and explanation. Additionally, we propose solutions to seemingly paradoxical views on some terms (e.g., social constructivist vs. individual constructivist perspective on explanandum).}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 15th International Conference on Formal Ontology in Information Systems}},
  isbn         = {{9781643686172}},
  issn         = {{0922-6389}},
  location     = {{Catania, Italy}},
  pages        = {{255–268}},
  publisher    = {{IOS Press}},
  title        = {{{A BFO-based ontological analysis of entities in Social XAI}}},
  doi          = {{10.3233/faia250498}},
  year         = {{2025}},
}

@inproceedings{61202,
  abstract     = {{The number of datasets on the web of data increases continuously. However, the knowledge contained therein cannot be fully utilized without finding links between the entities contained in these datasets. Equivalent entities can not be identified solely by checking the equivalence of IRIs because of the different origins and naming schemes of different data providers. Yet, such equivalences can be discovered by computing the similarity of their attributes. In this paper we propose GLIDE, an approach that links entities from two different datasets by embedding a joint model of these datasets enriched by additional relations describing the similarity of literals. The joint model is embedded into a latent vector space while paying attention to juxtaposing similar literals. We evaluate our approach against state-of-the-art algorithms using real-world datasets commonly used in link discovery literature. The results show that GLIDE outperforms all baselines on 5 of 7 datasets with perfect or near-perfect accuracy. Our approach achieves its best performance on datasets that feature several literals with similarities. Our experiments indicate that researchers should not only pay attention to equal literals in knowledge graph embedding but should also be aware of the distance between similar literals.}},
  author       = {{Becker, Alexander and Ngonga Ngomo, Axel-Cyrille and Sherif, Mohamed }},
  booktitle    = {{The Semantic Web – ISWC 2025}},
  keywords     = {{becker sherif enexa sailproject dice simba ngonga whale}},
  title        = {{{GLIDE: Knowledge Graph Linking using Distance-Aware Embeddings}}},
  year         = {{2025}},
}

@unpublished{61213,
  abstract     = {{Understanding how scaffolding strategies influence human understanding in
human-robot interaction is important for developing effective assistive
systems. This empirical study investigates linguistic scaffolding strategies
based on negation as an important means that de-biases the user from potential
errors but increases processing costs and hesitations as a means to ameliorate
processing costs. In an adaptive strategy, the user state with respect to the
current state of understanding and processing capacity was estimated via a
scoring scheme based on task performance, prior scaffolding strategy, and
current eye gaze behavior. In the study, the adaptive strategy of providing
negations and hesitations was compared with a non-adaptive strategy of
providing only affirmations. The adaptive scaffolding strategy was generated
using the computational model SHIFT. Our findings indicate that using adaptive
scaffolding strategies with SHIFT tends to (1) increased processing costs, as
reflected in longer reaction times, but (2) improved task understanding,
evidenced by a lower error rate of almost 23%. We assessed the efficiency of
SHIFT's selected scaffolding strategies across different cognitive states,
finding that in three out of five states, the error rate was lower compared to
the baseline condition. We discuss how these results align with the assumptions
of the SHIFT model and highlight areas for refinement. Moreover, we demonstrate
how scaffolding strategies, such as negation and hesitation, contribute to more
effective human-robot explanatory dialogues.}},
  author       = {{Groß, André and Richter, Birte and Thomzik, Bjarne and Wrede, Britta}},
  booktitle    = {{arXiv:2503.19692}},
  title        = {{{Leveraging Cognitive States for Adaptive Scaffolding of Understanding in
  Explanatory Tasks in HRI}}},
  year         = {{2025}},
}

@unpublished{61214,
  abstract     = {{In this work, we present a domain-independent approach for adaptive
scaffolding in robotic explanation generation to guide tasks in human-robot
interaction. We present a method for incorporating interdisciplinary research
results into a computational model as a pre-configured scoring system
implemented in a framework called SHIFT. This involves outlining a procedure
for integrating concepts from disciplines outside traditional computer science
into a robotics computational framework. Our approach allows us to model the
human cognitive state into six observable states within the human partner
model. To study the pre-configuration of the system, we implement a
reinforcement learning approach on top of our model. This approach allows
adaptation to individuals who deviate from the configuration of the scoring
system. Therefore, in our proof-of-concept evaluation, the model's adaptability
on four different user types shows that the models' adaptation performs better,
i.e., recouped faster after exploration and has a higher accumulated reward
with our pre-configured scoring system than without it. We discuss further
strategies of speeding up the learning phase to enable a realistic adaptation
behavior to real users. The system is accessible through docker and supports
querying via ROS.}},
  author       = {{Groß, André and Richter, Birte and Wrede, Britta}},
  booktitle    = {{arXiv:2503.16447}},
  title        = {{{SHIFT: An Interdisciplinary Framework for Scaffolding Human Attention
  and Understanding in Explanatory Tasks}}},
  year         = {{2025}},
}

@article{61134,
  author       = {{Manzoor, Ali and Speck, René and Zahera, Hamada Mohamed Abdelsamee and Saleem, Muhammad and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{2169-3536}},
  journal      = {{IEEE Access}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Multilingual Relation Extraction - A Survey}}},
  doi          = {{10.1109/access.2025.3604258}},
  year         = {{2025}},
}

@inbook{61222,
  author       = {{Lenke, Michael and Klowait, Nils and Biere, Lea and Schulte, Carsten}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032012210}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Assessing AI Literacy: A Systematic Review of Questionnaires with Emphasis on Affective, Behavioral, Cognitive, and Ethical Aspects}}},
  doi          = {{10.1007/978-3-032-01222-7_8}},
  year         = {{2025}},
}

@article{61223,
  abstract     = {{<jats:p>Contemporary debates about artificial intelligence (AI) still treat automation as a straightforward substitution of human labor by machines. Drawing on Goffman’s dramaturgical sociology, this paper reframes AI in the workplace as <jats:italic>supplementary</jats:italic> rather than <jats:italic>substitutive</jats:italic> automation. We argue that the central—but routinely overlooked—terrain of struggle is symbolic-interactional: workers continuously stage, conceal, and re-negotiate what counts as “real” work and professional competence. Large language models (LLMs) such as ChatGPT exemplify this dynamic. They quietly take over the invisible, routinised tasks that underpin cognitive occupations (editing, summarizing, first-draft production) while leaving humans to enact the highly visible or relational facets that sustain occupational prestige. Drawing on diverse sources to illustrate our theoretical argument, we show how individual workers, dramaturgical teams, and entire professional fields manage impressions of expertise in order to counter status threats, renegotiate fees, or obscure the extent of AI assistance. The paper itself, having been intentionally written with the ‘aid’ of all presently available frontier AI models, serves as a meta-reflexive performance of professional self-staging. The dramaturgical framework clarifies why utopian tales of friction-free augmentation and dystopian narratives of total displacement both misread how automation is actually unfolding. By foregrounding visibility, obfuscation, and impression management, the article presents a differentiated case for AI’s impact on the performative structure of work, outlines diagnostic tools for assessing real-world AI exposure beyond hype-driven headlines, and argues for a more human-centered basis for evaluating policy responses to the ‘fourth industrial revolution.’ In short, AI enters the labor process not as an autonomous actor, but as a prop within an ongoing social performance—one whose scripts, stages, and audiences remain irreducibly human.</jats:p>}},
  author       = {{Klowait, Nils and Erofeeva, Maria}},
  issn         = {{2297-7775}},
  journal      = {{Frontiers in Sociology}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{The presentation of self in the age of ChatGPT}}},
  doi          = {{10.3389/fsoc.2025.1614473}},
  volume       = {{10}},
  year         = {{2025}},
}

@inbook{61226,
  author       = {{Erofeeva, Maria and Klowait, Nils and Belov, Mikael and Soulié, Yoann}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031980794}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Leveraging VR Tools for Inclusive Education: Implications from Sign Language Learning in VRChat}}},
  doi          = {{10.1007/978-3-031-98080-0_10}},
  year         = {{2025}},
}

@inproceedings{61229,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Frazzetto, Paolo and Strotherm, Janine and Hermes, Luca and Sperduti, Alessandro and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{The Thirteenth International Conference on Learning Representations (ICLR)}},
  title        = {{{Exact Computation of Any-Order Shapley Interactions for Graph Neural Networks}}},
  year         = {{2025}},
}

@inproceedings{61232,
  author       = {{Visser, Roel and Fumagalli, Fabian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  keywords     = {{FF}},
  title        = {{{Explaining Outliers using Isolation Forest and Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{61231,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara and Herbinger, Julia}},
  booktitle    = {{Proceedings of The 28th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{5140--5148}},
  publisher    = {{PMLR}},
  title        = {{{Unifying Feature-Based Explanations with Functional ANOVA and Cooperative Game Theory}}},
  volume       = {{258}},
  year         = {{2025}},
}

@inproceedings{61153,
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Abstracts of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  location     = {{Bielefeld, Germany}},
  title        = {{{A BFO-based ontology of context for Social XAI}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{59856,
  abstract     = {{Recent advances on instruction fine-tuning have led to the development of various prompting techniques for large language models, such as explicit reasoning steps. However, the success of techniques depends on various parameters, such as the task, language model, and context provided. Finding an effective prompt is, therefore, often a trial-and-error process. Most existing approaches to automatic prompting aim to optimize individual techniques instead of compositions of techniques and their dependence on the input. To fill this gap, we propose an adaptive prompting approach that predicts the optimal prompt composition ad-hoc for a given input. We apply our approach to social bias detection, a highly context-dependent task that requires semantic understanding. We evaluate it with three large language models on three datasets, comparing compositions to individual techniques and other baselines. The results underline the importance of finding an effective prompt composition. Our approach robustly ensures high detection performance, and is best in several settings. Moreover, first experiments on other tasks support its generalizability.}},
  author       = {{Spliethöver, Maximilian and Knebler, Tim and Fumagalli, Fabian and Muschalik, Maximilian and Hammer, Barbara and Hüllermeier, Eyke and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Chiruzzo, Luis and Ritter, Alan and Wang, Lu}},
  isbn         = {{979-8-89176-189-6}},
  pages        = {{2421–2449}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Adaptive Prompting: Ad-hoc Prompt Composition for Social Bias Detection}}},
  year         = {{2025}},
}

@article{61241,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>Given the presence of fake news and pseudoscience (often disguised as physics), the importance of inoculating citizens against this type of misinformation has gained increasing attention in education. This is a goal that all subjects should pursue equally from their respective disciplinary perspectives. &amp;#xD;The paper presents a teaching approach to protect physics students from misinformation and pseudoscience by combining these three common strategies: First, understanding the fundamental principles of the Nature of Science. Second, identifying techniques of Science Denial. And third, applying heuristics for evaluating (supposedly) scientific information. Finally, the paper offers practical suggestions for applying these strategies in a master's-level physics course, using examples from the field of physics.</jats:p>}},
  author       = {{Webersen, Yvonne and Riese, Josef}},
  issn         = {{0143-0807}},
  journal      = {{European Journal of Physics}},
  publisher    = {{IOP Publishing}},
  title        = {{{Protecting physics students from pseudoscience - combining strategies for a comprehensive teaching approach}}},
  doi          = {{10.1088/1361-6404/ae03f6}},
  year         = {{2025}},
}

@article{61245,
  author       = {{Barkhausen, Franziska and Ares Santos, Laura and Schumacher, Stefan and Sperling, Jan}},
  issn         = {{2469-9926}},
  journal      = {{Physical Review A}},
  number       = {{3}},
  publisher    = {{American Physical Society (APS)}},
  title        = {{{Entanglement between dependent degrees of freedom: Quasiparticle correlations}}},
  doi          = {{10.1103/physreva.111.032404}},
  volume       = {{111}},
  year         = {{2025}},
}

@article{61246,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>The time-dependent one-dimensional nonlinear Schrödinger equation (NLSE) is solved numerically by a hybrid pseudospectral-variational quantum algorithm that connects a pseudospectral step for the Hamiltonian term with a variational step for the nonlinear term. The Hamiltonian term is treated as an integrating factor by forward and backward Fourier transforms, which are here carried out classically. This split allows us to avoid higher-order time integration schemes, to apply a first-order explicit time stepping for the remaining nonlinear NLSE term in a variational algorithm block, and thus to avoid numerical instabilities. We demonstrate that the analytical solution is reproduced with a small root mean square error for a long time interval over which a nonlinear soliton propagates significantly forward in space while keeping its shape. We analyze the accuracy and complexity of the quantum algorithm, the expressibility of the ansatz circuit and compare it with classical approaches. Furthermore, we investigate the influence of algorithm parameters on the accuracy of the results, including the temporal step width and the depth of the quantum circuit.</jats:p>}},
  author       = {{Köcher, Nikolas and Rose, Hendrik and Bharadwaj, Sachin S. and Schumacher, Jörg and Schumacher, Stefan}},
  issn         = {{2045-2322}},
  journal      = {{Scientific Reports}},
  number       = {{1}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Numerical solution of nonlinear Schrödinger equation by a hybrid pseudospectral-variational quantum algorithm}}},
  doi          = {{10.1038/s41598-025-05660-3}},
  volume       = {{15}},
  year         = {{2025}},
}

@article{61249,
  author       = {{Ai, Qiang and Wingenbach, Jan and Yang, Xinmiao and Wei, Jing and Hatzopoulos, Zaharias and Savvidis, Pavlos G. and Schumacher, Stefan and Ma, Xuekai and Gao, Tingge}},
  issn         = {{2331-7019}},
  journal      = {{Physical Review Applied}},
  number       = {{2}},
  publisher    = {{American Physical Society (APS)}},
  title        = {{{Optically and remotely controlling localization of exciton-polariton condensates in a potential lattice}}},
  doi          = {{10.1103/physrevapplied.23.024029}},
  volume       = {{23}},
  year         = {{2025}},
}

