@article{59226,
  author       = {{Herzig, Bardo}},
  issn         = {{0937-7239}},
  journal      = {{SchulVerwaltung NRW}},
  number       = {{1/25}},
  pages        = {{18--20}},
  publisher    = {{Carl Link}},
  title        = {{{Künstliche Intelligenz und professionsbezogene Aufgaben von Lehrkräften}}},
  year         = {{2025}},
}

@article{60949,
  author       = {{Giese, Henning and Holtmann, Svea and Koch, Reinald and Langenmayr, Dominika}},
  journal      = {{ifo Schnelldienst}},
  number       = {{8}},
  pages        = {{34--40}},
  title        = {{{Steuerliches Investitionssofortprogramm: Ausreichender Schritt zur Stärkung des Wirtschaftsstandorts Deutschland?}}},
  volume       = {{78}},
  year         = {{2025}},
}

@inproceedings{61190,
  author       = {{Sengupta, Meghdut and Muschalik, Maximilian  and Fumagalli, Fabian and Hammer, Barbara and Hüllermeier, Eyke  and Ghosh, Debanjan and Wachsmuth, Henning}},
  booktitle    = {{Accepted in Findings }},
  publisher    = {{EMNLP }},
  title        = {{{Investigating the Impact of Conceptual Metaphors on LLM-based NLI through Shapley Interactions}}},
  year         = {{2025}},
}

@article{61123,
  abstract     = {{<jats:p>Knowledge graphs are used by a growing number of applications to represent structured data. Hence, evaluating the veracity of assertions in knowledge graphs—dubbed fact checking—is currently a challenge of growing importance. However, manual fact checking is commonly impractical due to the sheer size of knowledge graphs. This paper is a systematic survey of recent works on automatic fact checking with a focus on knowledge graphs. We present recent fact-checking approaches, the varied sources they use as background knowledge, and the features they rely upon. Finally, we draw conclusions pertaining to possible future research directions in fact checking knowledge graphs.</jats:p>}},
  author       = {{Qudus, Umair and Röder, Michael and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{0360-0300}},
  journal      = {{ACM Computing Surveys}},
  keywords     = {{fact checking, knowledge graphs, fact-checkers, check worthiness, evidence retrieval, trust, veracity.}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Fact Checking Knowledge Graphs -- A Survey}}},
  doi          = {{10.1145/3749838}},
  volume       = {{58}},
  year         = {{2025}},
}

@article{59912,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>We study the expressivity and the complexity of various logics in probabilistic team semantics with the Boolean negation. In particular, we study the extension of probabilistic independence logic with the Boolean negation, and a recently introduced logic first-order theory of random variables with probabilistic independence. We give several results that compare the expressivity of these logics with the most studied logics in probabilistic team semantics setting, as well as relating their expressivity to a numerical variant of second-order logic. In addition, we introduce novel entropy atoms and show that the extension of first-order logic by entropy atoms subsumes probabilistic independence logic. Finally, we obtain some results on the complexity of model checking, validity and satisfiability of our logics.</jats:p>}},
  author       = {{Hannula, Miika and Hirvonen, Minna and Kontinen, Juha and Mahmood, Yasir and Meier, Arne and Virtema, Jonni}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  number       = {{3}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{Logics with probabilistic team semantics and the Boolean negation}}},
  doi          = {{10.1093/logcom/exaf021}},
  volume       = {{35}},
  year         = {{2025}},
}

@inproceedings{59054,
  author       = {{Firmansyah, Asep Fajar and Zahera, Hamada Mohamed Abdelsamee and Sherif, Mohamed and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{ESWC2025}},
  isbn         = {{978-3-031-94575-5}},
  keywords     = {{firmansyah mousallem ngonga sherif zahera}},
  pages        = {{133----151}},
  publisher    = {{pringer Nature Switzerland}},
  title        = {{{ANTS: Abstractive Entity Summarization in Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-94575-5_8}},
  year         = {{2025}},
}

@unpublished{61066,
  abstract     = {{Argumentation is a central subarea of Artificial Intelligence (AI) for
modeling and reasoning about arguments. The semantics of abstract argumentation
frameworks (AFs) is given by sets of arguments (extensions) and conditions on
the relationship between them, such as stable or admissible. Today's solvers
implement tasks such as finding extensions, deciding credulous or skeptical
acceptance, counting, or enumerating extensions. While these tasks are well
charted, the area between decision, counting/enumeration and fine-grained
reasoning requires expensive reasoning so far. We introduce a novel concept
(facets) for reasoning between decision and enumeration. Facets are arguments
that belong to some extensions (credulous) but not to all extensions
(skeptical). They are most natural when a user aims to navigate, filter, or
comprehend the significance of specific arguments, according to their needs. We
study the complexity and show that tasks involving facets are much easier than
counting extensions. Finally, we provide an implementation, and conduct
experiments to demonstrate feasibility.}},
  author       = {{Fichte, Johannes and Fröhlich, Nicolas and Hecher, Markus and Lagerkvist, Victor and Mahmood, Yasir and Meier, Arne and Persson, Jonathan}},
  booktitle    = {{arXiv:2505.10982}},
  title        = {{{Facets in Argumentation: A Formal Approach to Argument Significance}}},
  year         = {{2025}},
}

@article{61198,
  author       = {{Rogge, Tim and Herzig, Bardo}},
  journal      = {{education sciences}},
  number       = {{15}},
  publisher    = {{MDPI}},
  title        = {{{Enhancing Pre-Service Teachers' Reflective Competence Through Structured Video Annotation}}},
  year         = {{2025}},
}

@inbook{61150,
  abstract     = {{Since the emergence of the field of eXplainable Artificial Intelligence (XAI), a growing number of researchers have argued that XAI should consider insights from the social sciences in order to adapt explanations to the expectations and needs of human users. This has led to the emergence of a field called Social XAI, which is concerned with understanding how explanations are actively shaped in the interaction between a human user and an AI system. Recognizing this turn in XAI toward making XAI systems more “social” by providing explanations that focus on human information needs and incorporating insights from human–human explanatory interactions, in this paper we provide a formal foundation for Social XAI. We do so by proposing novel ontological accounts of the key terms used in Social XAI based on Basic Formal Ontology (BFO). Specifically, we provide novel ontological accounts for explanandum, explanans, understanding, explanation, explainer, explainee, and context. In doing so, we discuss multifaceted entities in Social XAI (having both continuant and occurrent facets; e.g., explanation) and the relationship between understanding and explanation. Additionally, we propose solutions to seemingly paradoxical views on some terms (e.g., social constructivist vs. individual constructivist perspective on explanandum).}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 15th International Conference on Formal Ontology in Information Systems}},
  isbn         = {{9781643686172}},
  issn         = {{0922-6389}},
  location     = {{Catania, Italy}},
  pages        = {{255–268}},
  publisher    = {{IOS Press}},
  title        = {{{A BFO-based ontological analysis of entities in Social XAI}}},
  doi          = {{10.3233/faia250498}},
  year         = {{2025}},
}

@inproceedings{61202,
  abstract     = {{The number of datasets on the web of data increases continuously. However, the knowledge contained therein cannot be fully utilized without finding links between the entities contained in these datasets. Equivalent entities can not be identified solely by checking the equivalence of IRIs because of the different origins and naming schemes of different data providers. Yet, such equivalences can be discovered by computing the similarity of their attributes. In this paper we propose GLIDE, an approach that links entities from two different datasets by embedding a joint model of these datasets enriched by additional relations describing the similarity of literals. The joint model is embedded into a latent vector space while paying attention to juxtaposing similar literals. We evaluate our approach against state-of-the-art algorithms using real-world datasets commonly used in link discovery literature. The results show that GLIDE outperforms all baselines on 5 of 7 datasets with perfect or near-perfect accuracy. Our approach achieves its best performance on datasets that feature several literals with similarities. Our experiments indicate that researchers should not only pay attention to equal literals in knowledge graph embedding but should also be aware of the distance between similar literals.}},
  author       = {{Becker, Alexander and Ngonga Ngomo, Axel-Cyrille and Sherif, Mohamed }},
  booktitle    = {{The Semantic Web – ISWC 2025}},
  keywords     = {{becker sherif enexa sailproject dice simba ngonga whale}},
  title        = {{{GLIDE: Knowledge Graph Linking using Distance-Aware Embeddings}}},
  year         = {{2025}},
}

@unpublished{61213,
  abstract     = {{Understanding how scaffolding strategies influence human understanding in
human-robot interaction is important for developing effective assistive
systems. This empirical study investigates linguistic scaffolding strategies
based on negation as an important means that de-biases the user from potential
errors but increases processing costs and hesitations as a means to ameliorate
processing costs. In an adaptive strategy, the user state with respect to the
current state of understanding and processing capacity was estimated via a
scoring scheme based on task performance, prior scaffolding strategy, and
current eye gaze behavior. In the study, the adaptive strategy of providing
negations and hesitations was compared with a non-adaptive strategy of
providing only affirmations. The adaptive scaffolding strategy was generated
using the computational model SHIFT. Our findings indicate that using adaptive
scaffolding strategies with SHIFT tends to (1) increased processing costs, as
reflected in longer reaction times, but (2) improved task understanding,
evidenced by a lower error rate of almost 23%. We assessed the efficiency of
SHIFT's selected scaffolding strategies across different cognitive states,
finding that in three out of five states, the error rate was lower compared to
the baseline condition. We discuss how these results align with the assumptions
of the SHIFT model and highlight areas for refinement. Moreover, we demonstrate
how scaffolding strategies, such as negation and hesitation, contribute to more
effective human-robot explanatory dialogues.}},
  author       = {{Groß, André and Richter, Birte and Thomzik, Bjarne and Wrede, Britta}},
  booktitle    = {{arXiv:2503.19692}},
  title        = {{{Leveraging Cognitive States for Adaptive Scaffolding of Understanding in
  Explanatory Tasks in HRI}}},
  year         = {{2025}},
}

@unpublished{61214,
  abstract     = {{In this work, we present a domain-independent approach for adaptive
scaffolding in robotic explanation generation to guide tasks in human-robot
interaction. We present a method for incorporating interdisciplinary research
results into a computational model as a pre-configured scoring system
implemented in a framework called SHIFT. This involves outlining a procedure
for integrating concepts from disciplines outside traditional computer science
into a robotics computational framework. Our approach allows us to model the
human cognitive state into six observable states within the human partner
model. To study the pre-configuration of the system, we implement a
reinforcement learning approach on top of our model. This approach allows
adaptation to individuals who deviate from the configuration of the scoring
system. Therefore, in our proof-of-concept evaluation, the model's adaptability
on four different user types shows that the models' adaptation performs better,
i.e., recouped faster after exploration and has a higher accumulated reward
with our pre-configured scoring system than without it. We discuss further
strategies of speeding up the learning phase to enable a realistic adaptation
behavior to real users. The system is accessible through docker and supports
querying via ROS.}},
  author       = {{Groß, André and Richter, Birte and Wrede, Britta}},
  booktitle    = {{arXiv:2503.16447}},
  title        = {{{SHIFT: An Interdisciplinary Framework for Scaffolding Human Attention
  and Understanding in Explanatory Tasks}}},
  year         = {{2025}},
}

@article{61134,
  author       = {{Manzoor, Ali and Speck, René and Zahera, Hamada Mohamed Abdelsamee and Saleem, Muhammad and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{2169-3536}},
  journal      = {{IEEE Access}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Multilingual Relation Extraction - A Survey}}},
  doi          = {{10.1109/access.2025.3604258}},
  year         = {{2025}},
}

@inbook{61222,
  author       = {{Lenke, Michael and Klowait, Nils and Biere, Lea and Schulte, Carsten}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032012210}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Assessing AI Literacy: A Systematic Review of Questionnaires with Emphasis on Affective, Behavioral, Cognitive, and Ethical Aspects}}},
  doi          = {{10.1007/978-3-032-01222-7_8}},
  year         = {{2025}},
}

@article{61223,
  abstract     = {{<jats:p>Contemporary debates about artificial intelligence (AI) still treat automation as a straightforward substitution of human labor by machines. Drawing on Goffman’s dramaturgical sociology, this paper reframes AI in the workplace as <jats:italic>supplementary</jats:italic> rather than <jats:italic>substitutive</jats:italic> automation. We argue that the central—but routinely overlooked—terrain of struggle is symbolic-interactional: workers continuously stage, conceal, and re-negotiate what counts as “real” work and professional competence. Large language models (LLMs) such as ChatGPT exemplify this dynamic. They quietly take over the invisible, routinised tasks that underpin cognitive occupations (editing, summarizing, first-draft production) while leaving humans to enact the highly visible or relational facets that sustain occupational prestige. Drawing on diverse sources to illustrate our theoretical argument, we show how individual workers, dramaturgical teams, and entire professional fields manage impressions of expertise in order to counter status threats, renegotiate fees, or obscure the extent of AI assistance. The paper itself, having been intentionally written with the ‘aid’ of all presently available frontier AI models, serves as a meta-reflexive performance of professional self-staging. The dramaturgical framework clarifies why utopian tales of friction-free augmentation and dystopian narratives of total displacement both misread how automation is actually unfolding. By foregrounding visibility, obfuscation, and impression management, the article presents a differentiated case for AI’s impact on the performative structure of work, outlines diagnostic tools for assessing real-world AI exposure beyond hype-driven headlines, and argues for a more human-centered basis for evaluating policy responses to the ‘fourth industrial revolution.’ In short, AI enters the labor process not as an autonomous actor, but as a prop within an ongoing social performance—one whose scripts, stages, and audiences remain irreducibly human.</jats:p>}},
  author       = {{Klowait, Nils and Erofeeva, Maria}},
  issn         = {{2297-7775}},
  journal      = {{Frontiers in Sociology}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{The presentation of self in the age of ChatGPT}}},
  doi          = {{10.3389/fsoc.2025.1614473}},
  volume       = {{10}},
  year         = {{2025}},
}

@inbook{61226,
  author       = {{Erofeeva, Maria and Klowait, Nils and Belov, Mikael and Soulié, Yoann}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031980794}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Leveraging VR Tools for Inclusive Education: Implications from Sign Language Learning in VRChat}}},
  doi          = {{10.1007/978-3-031-98080-0_10}},
  year         = {{2025}},
}

@inproceedings{61229,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Frazzetto, Paolo and Strotherm, Janine and Hermes, Luca and Sperduti, Alessandro and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{The Thirteenth International Conference on Learning Representations (ICLR)}},
  title        = {{{Exact Computation of Any-Order Shapley Interactions for Graph Neural Networks}}},
  year         = {{2025}},
}

@inproceedings{61232,
  author       = {{Visser, Roel and Fumagalli, Fabian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  keywords     = {{FF}},
  title        = {{{Explaining Outliers using Isolation Forest and Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{61231,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara and Herbinger, Julia}},
  booktitle    = {{Proceedings of The 28th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{5140--5148}},
  publisher    = {{PMLR}},
  title        = {{{Unifying Feature-Based Explanations with Functional ANOVA and Cooperative Game Theory}}},
  volume       = {{258}},
  year         = {{2025}},
}

@inproceedings{61153,
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Abstracts of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  location     = {{Bielefeld, Germany}},
  title        = {{{A BFO-based ontology of context for Social XAI}}},
  year         = {{2025}},
}

