@inproceedings{61177,
  abstract     = {{Human and model-generated texts can be distinguished by examining the magnitude of likelihood in language. However, it is becoming increasingly difficult as language model's capabilities of generating human-like texts keep evolving. This study provides a new perspective by using the relative likelihood values instead of absolute ones, and extracting useful features from the spectrum-view of likelihood for the human-model text detection task. We propose a detection procedure with two classification methods, supervised and heuristic-based, respectively, which results in competitive performances with previous zero-shot detection methods and a new state-of-the-art on short-text detection. Our method can also reveal subtle differences between human and model languages, which find theoretical roots in psycholinguistics studies.}},
  author       = {{Xu, Yang and Wang, Yu and An, Hao and Liu, Zhichen and Li, Yongyuan}},
  booktitle    = {{Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}},
  location     = {{Miami, FL, USA}},
  pages        = {{10108–10121}},
  publisher    = {{ACL}},
  title        = {{{Detecting subtle differences between human and model languages using spectrum of relative likelihood}}},
  doi          = {{10.18653/v1/2024.emnlp-main.564}},
  year         = {{2024}},
}

@inproceedings{56985,
  author       = {{Buschmeier, Hendrik and Kopp, Stefan and Hassan, Teena}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{698--699}},
  publisher    = {{ACM}},
  title        = {{{Multimodal Co-Construction of Explanations with XAI Workshop}}},
  doi          = {{10.1145/3678957.3689205}},
  year         = {{2024}},
}

@inproceedings{55916,
  abstract     = {{To produce explanations that are more likely to be accepted by humans, Explainable Artificial Intelligence (XAI) systems need to incorporate explanation models grounded in human communication patterns. So far, little is known about how an explainee, who lacks understanding of an issue, and an explainer, who has knowledge to fill the explainee's knowledge gap, actively shape an explanation process, and how their involvement relates to explanatory success in terms of maximizing the explainee's level of understanding. In this paper, we characterize explanations as dialogues in which explainee and explainer take turns to advance the explanation process. We build on an existing annotation scheme of ‘explanatory moves’ to characterize such turns, and manually annotate 362 dialogical explanations from the “Explain Like I'm Five” subreddit. Building on the annotated data, we compute correlations between explanatory moves and explanatory success, measured on a five-point Likert scale, in order to identify factors that are significantly correlated with explanatory success. Based on a qualitative analysis of these factors, we develop a conceptual model of the main factors that contribute to the success of explanatory dialogues.}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{373--381}},
  publisher    = {{ACM}},
  title        = {{{A model of factors contributing to the success of dialogical explanations}}},
  doi          = {{10.1145/3678957.3685744}},
  year         = {{2024}},
}

@inproceedings{55995,
  abstract     = {{Scripted dialogues such as movie and TV subtitles constitute a widespread source of training data for conversational NLP models. However, there are notable linguistic differences between these dialogues and spontaneous interactions, especially regarding the occurrence of communicative feedback such as backchannels, acknowledgments, or clarification requests. This paper presents a quantitative analysis of such feedback phenomena in both subtitles and spontaneous conversations. Based on conversational data spanning eight languages and multiple genres, we extract lexical statistics, classifications from a dialogue act tagger, expert annotations and labels derived from a fine-tuned Large Language Model (LLM). Our main empirical findings are that (1) communicative feedback is markedly less frequent in subtitles than in spontaneous dialogues and (2) subtitles contain a higher proportion of negative feedback. We also show that dialogues generated by standard LLMs lie much closer to scripted dialogues than spontaneous interactions in terms of communicative feedback.}},
  author       = {{Pilán, Ildikó and Prévot, Laurent and Buschmeier, Hendrik and Lison, Pierre}},
  booktitle    = {{Proceedings of the 25th Meeting of the Special Interest Group on Discourse and Dialogue}},
  location     = {{Kyoto, Japan}},
  pages        = {{440–457}},
  title        = {{{Conversational feedback in scripted versus spontaneous dialogues: A comparative analysis}}},
  doi          = {{10.18653/v1/2024.sigdial-1.38}},
  year         = {{2024}},
}

@inproceedings{55913,
  abstract     = {{We examined the turn-taking dynamics across different phases of explanatory dialogues, in which 21 different explainers explained a board game to 2–3 explainees each. Turn-taking dynamics are investigated focusing on >19K floor transitions, i.e., the detailed patterns characterizing turn keeping or turn yielding events (Gilmartin et al., 2020). The explanations were characterized by three different phases (board game absent, board game present, interactive game play), for which we observed differences in turn-taking dynamics: explanations where the board game is absent are characterized by less complex floor transitions, while explanations with a concretely shared reference space are characterized by more complex floor transitions, as well as more floor transitions between interlocutors. Also, the speakers’ dialogue role (explainer vs. explainee) appears to have a strong impact on turn-taking dynamics, as floor transitions that do not conform with the dialogue role tend to involve more effort, or floor management work.}},
  author       = {{Wagner, Petra and Włodarczak, Marcin and Buschmeier, Hendrik and Türk, Olcay and Gilmartin, Emer}},
  booktitle    = {{Proceedings of the 28th Workshop on the Semantics and Pragmatics of Dialogue}},
  issn         = {{2308-2275}},
  location     = {{Trento, Italy}},
  pages        = {{6--14}},
  title        = {{{Turn-taking dynamics across different phases of explanatory dialogues}}},
  year         = {{2024}},
}

@inbook{61210,
  abstract     = {{Knowledge graphs (KGs) differ significantly over multiple different versions of the same data source. They also often contain blank nodes that do not have a constant identifier over all versions. Linking such blank nodes from different versions is a challenging task. Previous works propose different approaches to create signatures for all blank nodes based on named nodes in their neighborhood to match blank nodes with similar signatures. However, these works struggle to find a good mapping when the difference between the KGs’ versions grows too large. In this work, we propose Blink, an embedding-based approach for blank node linking. Blink merges two KGs’ versions and embeds the merged graph into a latent vector space based on translational embeddings and subsequently matches the closest pairs of blank nodes from different graphs. We evaluate our approach using real-world datasets against state-of-the-art approaches by computing the blank node matching for isomorphic graphs and graphs that contain triple changes (i.e., added or removed triples). The results indicate that Blink achieves perfect accuracy for isomorphic graphs. For graph versions that contain changes, such as having up to 20% of triples removed in one version, Blink still produces a mapping with an Optimal Mapping Deviation Ratio of under 1%. These results show that Blink leads to a better linking of KGs over different versions and similar graphs adhering to the linked data guidelines.}},
  author       = {{Becker, Alexander and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031778438}},
  issn         = {{0302-9743}},
  location     = {{Baltimore, USA}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Blink: Blank Node Matching Using Embeddings}}},
  doi          = {{10.1007/978-3-031-77844-5_12}},
  year         = {{2024}},
}

@inproceedings{54084,
  author       = {{Karalis, Nikolaos and Bigerl, Alexander and Heidrich, Liss and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{ESWC}},
  keywords     = {{bigerl dice enexa heidrich karalis ngonga sail sherif}},
  title        = {{{Efficient Evaluation of Conjunctive Regular Path Queries Using Multi-way Joins}}},
  year         = {{2024}},
}

@inproceedings{61219,
  author       = {{Kumar, Ajay and Naumann, Marius and Henne, Kevin and Sherif, Mohamed}},
  booktitle    = {{Joint Proceedings of Posters, Demos, Workshops, and Tutorials of the 20th International Conference on Semantic Systems co-located with 20th International Conference on Semantic Systems (SEMANTiCS 2024), Amsterdam, The Netherlands, September 17-19, 2024}},
  editor       = {{Garijo, Daniel and Gentile, Anna Lisa and Kurteva, Anelia and Mannocci, Andrea and Osborne, Francesco and Vahdati, Sahar}},
  keywords     = {{kumar sherif enexa climatebowl ingrid simba dice whale}},
  location     = {{ Amsterdam,The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{PCFWebUI: Data-driven WebUI for holistic decarbonization based on PCF-Tracking}}},
  volume       = {{3759}},
  year         = {{2024}},
}

@book{61182,
  editor       = {{Herzig, Bardo and Eickelmann, Birgit and Schwabl, Franziska and Schulze, Johanna and Niemann, Jan}},
  isbn         = {{978-3-8309-4837-7}},
  issn         = {{2944-6791}},
  pages        = {{285}},
  publisher    = {{Waxmann}},
  title        = {{{Lehrkräftebildung in der digitalen Welt. Zukunftsorientierte Forschungs- und Praxisperspektiven }}},
  doi          = {{10.31244/9783830998372}},
  volume       = {{1}},
  year         = {{2024}},
}

@book{61186,
  editor       = {{Herzig, Bardo and Eickelmann, Birgit and Schwabl, Franziska and Schulze, Johanna and Niemann, Jan}},
  isbn         = {{978-3-8309-4837-7}},
  issn         = {{2944-6791}},
  pages        = {{285}},
  publisher    = {{Waxmann}},
  title        = {{{Lehrkräftebildung in der digitalen Welt. Zukunftsorientierte Forschungs- und Praxisperspektiven}}},
  volume       = {{1}},
  year         = {{2024}},
}

@inproceedings{55094,
  author       = {{Zahera, Hamada Mohamed Abdelsamee and Manzoor, Ali and Sherif, Mohamed and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{SEMANTiCS}},
  keywords     = {{TRR318 climatebowl colide dice enexa kiam manzoor moussallem ngonga sailproject sherif simba zahera}},
  title        = {{{Generating SPARQL from Natural Language Using Chain-of-Thoughts Prompting}}},
  year         = {{2024}},
}

@article{56190,
  abstract     = {{This study investigates the potential of using advanced conversational artificial intelligence (AI) to help people understand complex AI systems. In line with conversation-analytic research, we view the participatory role of AI as dynamically unfolding in a situation rather than being predetermined by its architecture. To study user sensemaking of intransparent AI systems, we set up a naturalistic encounter between human participants and two AI systems developed in-house: a reinforcement learning simulation and a GPT-4-based explainer chatbot. Our results reveal that an explainer-AI only truly functions as such when participants actively engage with it as a co-constructive agent. Both the interface’s spatial configuration and the asynchronous temporal nature of the explainer AI – combined with the users’ presuppositions about its role – contribute to the decision whether to treat the AI as a dialogical co-participant in the interaction. Participants establish evidentiality conventions and sensemaking procedures that may diverge from a system’s intended design or function.}},
  author       = {{Klowait, Nils and Erofeeva, Maria and Lenke, Michael and Horwath, Ilona and Buschmeier, Hendrik}},
  journal      = {{Discourse & Communication}},
  number       = {{6}},
  pages        = {{917--930}},
  publisher    = {{Sage}},
  title        = {{{Can AI explain AI? Interactive co-construction of explanations among human and artificial agents}}},
  doi          = {{10.1177/17504813241267069}},
  volume       = {{18}},
  year         = {{2024}},
}

@inproceedings{58224,
  author       = {{Kenneweg, Philip and Kenneweg, Tristan and Fumagalli, Fabian and Hammer, Barbara}},
  booktitle    = {{2024 International Joint Conference on Neural Networks (IJCNN)}},
  keywords     = {{Training, Schedules, Codes, Search methods, Source coding, Computer architecture, Transformers}},
  pages        = {{1--8}},
  title        = {{{No learning rates needed: Introducing SALSA - Stable Armijo Line Search Adaptation}}},
  doi          = {{10.1109/IJCNN60899.2024.10650124}},
  year         = {{2024}},
}

@inproceedings{53073,
  abstract     = {{While shallow decision trees may be interpretable, larger ensemble models like gradient-boosted trees, which often set the state of the art in machine learning problems involving tabular data, still remain black box models. As a remedy, the Shapley value (SV) is a well-known concept in explainable artificial intelligence (XAI) research for quantifying additive feature attributions of predictions. The model-specific TreeSHAP methodology solves the exponential complexity for retrieving exact SVs from tree-based models. Expanding beyond individual feature attribution, Shapley interactions reveal the impact of intricate feature interactions of any order. In this work, we present TreeSHAP-IQ, an efficient method to compute any-order additive Shapley interactions for predictions of tree-based models. TreeSHAP-IQ is supported by a mathematical framework that exploits polynomial arithmetic to compute the interaction scores in a single recursive traversal of the tree, akin to Linear TreeSHAP. We apply TreeSHAP-IQ on state-of-the-art tree ensembles and explore interactions on well-established benchmark datasets.}},
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)}},
  issn         = {{2374-3468}},
  keywords     = {{Explainable Artificial Intelligence}},
  number       = {{13}},
  pages        = {{14388--14396}},
  title        = {{{Beyond TreeSHAP: Efficient Computation of Any-Order Shapley Interactions for Tree Ensembles}}},
  doi          = {{10.1609/aaai.v38i13.29352}},
  volume       = {{38}},
  year         = {{2024}},
}

@inproceedings{55311,
  abstract     = {{Addressing the limitations of individual attribution scores via the Shapley value (SV), the field of explainable AI (XAI) has recently explored intricate interactions of features or data points. In particular, extensions of the SV, such as the Shapley Interaction Index (SII), have been proposed as a measure to still benefit from the axiomatic basis of the SV. However, similar to the SV, their exact computation remains computationally prohibitive. Hence, we propose with SVARM-IQ a sampling-based approach to efficiently approximate Shapley-based interaction indices of any order. SVARM-IQ can be applied to a broad class of interaction indices, including the SII, by leveraging a novel stratified representation. We provide non-asymptotic theoretical guarantees on its approximation quality and empirically demonstrate that SVARM-IQ achieves state-of-the-art estimation results in practical XAI scenarios on different model classes and application domains.}},
  author       = {{Kolpaczki, Patrick and Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of The 27th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{3520–3528}},
  publisher    = {{PMLR}},
  title        = {{{SVARM-IQ: Efficient Approximation of Any-order Shapley Interactions through Stratification}}},
  volume       = {{238}},
  year         = {{2024}},
}

@inproceedings{58223,
  abstract     = {{The Shapley value (SV) is a prevalent approach of allocating credit to machine learning (ML) entities to understand black box ML models. Enriching such interpretations with higher-order interactions is inevitable for complex systems, where the Shapley Interaction Index (SII) is a direct axiomatic extension of the SV. While it is well-known that the SV yields an optimal approximation of any game via a weighted least square (WLS) objective, an extension of this result to SII has been a long-standing open problem, which even led to the proposal of an alternative index. In this work, we characterize higher-order SII as a solution to a WLS problem, which constructs an optimal approximation via SII and k-Shapley values (k-SII). We prove this representation for the SV and pairwise SII and give empirically validated conjectures for higher orders. As a result, we propose KernelSHAP-IQ, a direct extension of KernelSHAP for SII, and demonstrate state-of-the-art performance for feature interactions.}},
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Kolpaczki, Patrick and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the 41st International Conference on Machine Learning (ICML)}},
  pages        = {{14308–14342}},
  publisher    = {{PMLR}},
  title        = {{{KernelSHAP-IQ: Weighted Least Square Optimization for Shapley Interactions}}},
  volume       = {{235}},
  year         = {{2024}},
}

@inproceedings{61228,
  author       = {{Muschalik, Maximilian and Baniecki, Hubert and Fumagalli, Fabian and Kolpaczki, Patrick and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Advances in Neural Information Processing Systems (NeurIPS)}},
  pages        = {{130324–130357}},
  title        = {{{shapiq: Shapley interactions for machine learning}}},
  volume       = {{37}},
  year         = {{2024}},
}

@inproceedings{61230,
  author       = {{Kolpaczki, Patrick and Bengs, Viktor and Muschalik, Maximilian and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of the AAAI conference on Artificial Intelligence (AAAI)}},
  number       = {{12}},
  pages        = {{13246–13255}},
  title        = {{{Approximating the shapley value without marginal contributions}}},
  volume       = {{38}},
  year         = {{2024}},
}

@inproceedings{61180,
  abstract     = {{Starting from the assumption that LLMs are systems bearing only formal but not functional linguistic competence, this short paper explores how the understanding capabilities of LLMs could be implicitly explained based on a “pause and refect” strategy. Specifically, we propose to include a virtual embodied agent in human interactions with LLM-based chatbots. The agent will use air quotes as multimodal metalinguistic markers to explicitly point to those parts of the LLM’s output that are relevant to explaining the LLM’s meaning understanding capabilities. At the same time, by scafolding users to perceive the output as ‘mentioned language’ inferred from a metalinguistic function of multimodal markers, the agent implicitly explains how the meaning of the output should be understood. In this proposal, users will actively participate in the co-construction of the implicit explanation by providing feedback and deciding when and to what extent the agent’s scafold (e.g., the air quotes) is used.}},
  author       = {{Belosevic, Milena and Buschmeier, Hendrik}},
  booktitle    = {{ICMI Companion ’24: Companion Proceedings of the 26th International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{225–227}},
  publisher    = {{ACM}},
  title        = {{{Quote to explain: Using multimodal metalinguistic markers to explain large language models’ understanding capabilities}}},
  doi          = {{10.1145/3686215.3689203}},
  year         = {{2024}},
}

@inproceedings{61176,
  abstract     = {{We revisit the phenomenon of syntactic complexity convergence in conversational interaction, originally found for English dialogue, which has theoretical implication for dialogical concepts such as mutual understanding. We use a modified metric to quantify syntactic complexity based on dependency parsing. The results show that syntactic complexity convergence can be statistically confirmed in one of three selected German datasets that were analysed. Given that the dataset which shows such convergence is much larger than the other two selected datasets, the empirical results indicate a certain degree of linguistic generality of syntactic complexity convergence in conversational interaction. We also found a different type of syntactic complexity convergence in one of the datasets while further investigation is still necessary.}},
  author       = {{Wang, Yu and Buschmeier, Hendrik}},
  booktitle    = {{Proceedings of the 20th Conference on Natural Language Processing (KONVENS 2024)}},
  location     = {{Vienna, Austria}},
  pages        = {{75–80}},
  title        = {{{Revisiting the phenomenon of syntactic complexity convergence on German dialogue data}}},
  year         = {{2024}},
}

