@inbook{59581,
  author       = {{Häsel-Weide, Uta and Nührenbörger, M.}},
  booktitle    = {{Beiträge zum Mathematikuntericht 2024. 57. Jahrestagung der Gesellschaft für Didaktik der Mathematik}},
  editor       = {{Ebers, P. and Rösken, F. and Barzel, B. and Büchter, A. and Schacht, F. and Scherer, P.}},
  pages        = {{207--210}},
  title        = {{{ Praktiken der Förderung im inklusiven Mathematikunterricht}}},
  doi          = {{https://doi.ohttps://doi.org/10.37626/GA9783959872782.0 rg/10.37626/GA9783959872782.0}},
  year         = {{2024}},
}

@article{50009,
  abstract     = {{<jats:p> In the past decades, the notion of voice in the theorizing and teaching of academic writing has been the subject of much debate and conceptual change, especially concerning its relation to writer identity. Many newer accounts of voice and identity in academic writing draw on the dialogical concept of voice by Bakhtin. However, some theoretical and methodological inconsistencies have surfaced in the adaptions of the concept. Working from a refinement of the dialogical notion of voice based on the concepts of polyphony and interiorization, this article presents a methodological approach for analyzing voice(s) in writing. The article presents material around the evolution of an early-career researcher’s dissertation synopsis. The material is multilayered, including the writer’s text, transcripts from an interdisciplinary peer-feedback conversation with two colleagues, and a video-stimulated interview with the writer. Excerpts of the material were analyzed to trace the polyphony of interiorized voices that influenced the writing. This focus revealed the multivoicedness of academic texts as an effect of their history of coming into being. This article contributes to the question of voice and identity in academic writing from a dialogical psycholinguistic perspective by presenting a de-reifying notion of voice grounded in an understanding of writing as a polyphonic activity, which also feeds into the formation of a writer’s self. </jats:p>}},
  author       = {{Karsten, Andrea}},
  issn         = {{0741-0883}},
  journal      = {{Written Communication}},
  keywords     = {{Literature and Literary Theory, Communication}},
  number       = {{1}},
  pages        = {{6--36}},
  publisher    = {{SAGE Publications}},
  title        = {{{Voices in Dialogue: Taking Polyphony in Academic Writing Seriously}}},
  doi          = {{10.1177/07410883231207104}},
  volume       = {{41}},
  year         = {{2024}},
}

@inbook{61166,
  author       = {{Dahl, Stefanie and Aschebrock, Kathrin}},
  booktitle    = {{Wissenstransfer in der Sportpädagogik}},
  editor       = {{Neuber, Nils}},
  isbn         = {{9783658436216}},
  issn         = {{2512-0697}},
  pages        = {{153--170}},
  publisher    = {{Springer Fachmedien Wiesbaden}},
  title        = {{{Forschungsverbund Kinder- und Jugendsport NRW – Transferformate für den Dialog zwischen Wissenschaft und Gesellschaft}}},
  doi          = {{10.1007/978-3-658-43622-3_10}},
  volume       = {{34}},
  year         = {{2024}},
}

@article{61172,
  author       = {{Coy, Sam and Czumaj, Artur and Scheideler, Christian and Schneider, Philipp and Werthmann, Julian}},
  issn         = {{0304-3975}},
  journal      = {{Theoretical Computer Science}},
  publisher    = {{Elsevier BV}},
  title        = {{{Routing Schemes for Hybrid Communication Networks}}},
  doi          = {{10.1016/j.tcs.2023.114352}},
  volume       = {{985}},
  year         = {{2024}},
}

@inbook{61163,
  author       = {{Herzig, Bardo and Losch, Daniel}},
  booktitle    = {{Fragmentierung in der Lehrkräftebildung - Das Lehramtsstudium im Spannungsfeld von Professionsorientierung, Bildungstheorie und (Fach-)Wissenschaft}},
  editor       = {{Gräf, Anne and Helling, Simon and Losch, Daniel  and Polcik, Thassilo and Rojahn, Pia and Wendland, Sebastian}},
  isbn         = {{978-3-7560-1473-6}},
  pages        = {{289--316}},
  publisher    = {{Nomos Verlagsgesellschaft mbH & Co.KG}},
  title        = {{{ Informatische Literalität und Medienbildung im Handeln von Lehrkräften}}},
  volume       = {{1}},
  year         = {{2024}},
}

@inbook{61188,
  author       = {{Schulze, Johanna and Herzig, Bardo and Lehberger, Regine}},
  booktitle    = {{Lehrkräftebildung in der digitalen Welt. Zukunftsorientierte Forschungs- und Praxisperspektiven }},
  editor       = {{Herzig, Bardo and Eickelmann, Birgit and Schwabl, Franziska and Schulze, Johanna and Niemann, Jan}},
  isbn         = {{978-3-8309-4837-7}},
  issn         = {{2944-6791}},
  pages        = {{75--83}},
  publisher    = {{Waxmann}},
  title        = {{{Agile Gestaltung digitalisierungsbezogener Schulentwicklung in der Lehrkräftebildung }}},
  doi          = {{10.31244/9783830998372}},
  volume       = {{1}},
  year         = {{2024}},
}

@inproceedings{56983,
  abstract     = {{Detecting the veracity of a statement automatically is a challenge the world is grappling with due to the vast amount of data spread across the web. Verifying a given claim typically entails validating it within the framework of supporting evidence like a retrieved piece of text. Classifying the stance of the text with respect to the claim is called stance classification. Despite advancements in automated fact-checking, most systems still rely on a substantial quantity of labeled training data, which can be costly. In this work, we avoid the costly training or fine-tuning of models by reusing pre-trained large language models together with few-shot in-context learning. Since we do not train any model, our approach ExPrompt is lightweight, demands fewer resources than other stance classification methods and can serve as a modern baseline for future developments. At the same time, our evaluation shows that our approach is able to outperform former state-of-the-art stance classification approaches regarding accuracy by at least 2 percent. Our scripts and data used in this paper are available at https://github.com/dice-group/ExPrompt.}},
  author       = {{Qudus, Umair and Röder, Michael and Vollmers, Daniel and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management}},
  isbn         = {{79-8-4007-0436-9/24/10}},
  keywords     = {{Stance Classification, Few-shot in-context learning, Pre-trained large language models}},
  location     = {{Boise, ID, USA}},
  pages        = {{3994 -- 3999}},
  publisher    = {{ACM}},
  title        = {{{ExPrompt: Augmenting Prompts Using Examples as Modern Baseline for Stance Classification}}},
  doi          = {{10.1145/3627673.3679923}},
  volume       = {{9}},
  year         = {{2024}},
}

@inproceedings{57240,
  abstract     = {{Validating assertions before adding them to a knowledge graph is an essential part of its creation and maintenance. Due to the sheer size of knowledge graphs, automatic fact-checking approaches have been developed. These approaches rely on reference knowledge to decide whether a given assertion is correct. Recent hybrid approaches achieve good results by including several knowledge sources. However, it is often impractical to provide a sheer quantity of textual knowledge or generate embedding models to leverage these hybrid approaches. We present FaVEL, an approach that uses algorithm selection and ensemble learning to amalgamate several existing fact-checking approaches that rely solely on a reference knowledge graph and, hence, use fewer resources than current hybrid approaches. For our evaluation, we create updated versions of two existing datasets and a new dataset dubbed FaVEL-DS. Our evaluation compares our approach to 15 fact-checking approaches—including the state-of-the-art approach HybridFC—on 3 datasets. Our results demonstrate that FaVEL outperforms all other approaches significantly by at least 0.04 in terms of the area under the ROC curve. Our source code, datasets, and evaluation results are open-source and can be found at https://github.com/dice-group/favel.}},
  author       = {{Qudus, Umair and Röder, Michael and Tatkeu Pekarou, Franck Lionel and Morim da Silva, Ana Alexandra and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{EKAW 2024}},
  editor       = {{Rospocher, Marco}},
  keywords     = {{fact checking, ensemble learning, transfer learning, knowledge management.}},
  location     = {{Amsterdam, Netherlands}},
  title        = {{{FaVEL: Fact Validation Ensemble Learning}}},
  year         = {{2024}},
}

@article{61197,
  author       = {{Herzig, Bardo}},
  journal      = {{Plan BD. Online-Magazin für Schule in der Kultur der Digitalität}},
  publisher    = {{https://magazin.forumbd.de/lehren-und-lernen/ki-bezogene-kompetenzen-von-lehrkraeften/}},
  title        = {{{KI-bezogene Kompetenzen von Lehrkräften }}},
  year         = {{2024}},
}

@inproceedings{58377,
  abstract     = {{The connection between inconsistent databases and Dung's abstract
argumentation framework has recently drawn growing interest. Specifically, an
inconsistent database, involving certain types of integrity constraints such as
functional and inclusion dependencies, can be viewed as an argumentation
framework in Dung's setting. Nevertheless, no prior work has explored the exact
expressive power of Dung's theory of argumentation when compared to
inconsistent databases and integrity constraints. In this paper, we close this
gap by arguing that an argumentation framework can also be viewed as an
inconsistent database. We first establish a connection between subset-repairs
for databases and extensions for AFs, considering conflict-free, naive,
admissible, and preferred semantics. Further, we define a new family of
attribute-based repairs based on the principle of maximal content preservation.
The effectiveness of these repairs is then highlighted by connecting them to
stable, semi-stable, and stage semantics. Our main contributions include
translating an argumentation framework into a database together with integrity
constraints. Moreover, this translation can be achieved in polynomial time,
which is essential in transferring complexity results between the two
formalisms.}},
  author       = {{Mahmood, Yasir and Hecher, Markus and Ngonga Ngomo, Axel-Cyrille}},
  title        = {{{Dung's Argumentation Framework: Unveiling the Expressive Power with  Inconsistent Databases}}},
  doi          = {{10.1609/AAAI.V39I14.33651}},
  year         = {{2024}},
}

@inproceedings{61179,
  abstract     = {{We examine how users perceive the limitations of an AI system when it encounters a task that it cannot perform perfectly and whether providing explanations alongside its answers aids users in constructing an appropriate mental model of the system's capabilities and limitations. We employ a visual question answer and explanation task where we control the AI system's limitations by manipulating the visual inputs: during inference, the system either processes full-color or grayscale images. Our goal is to determine whether participants can perceive the limitations of the system. We hypothesize that explanations will make limited AI capabilities more transparent to users. However, our results show that explanations do not have this effect. Instead of allowing users to more accurately assess the limitations of the AI system, explanations generally increase users' perceptions of the system's competence – regardless of its actual performance.}},
  author       = {{Sieker, Judith and Junker, Simeon and Utescher, Ronja and Attari, Nazia and Wersing, Heiko and Buschmeier, Hendrik and Zarrieß, Sina}},
  booktitle    = {{Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}},
  location     = {{Miami, FL, USA}},
  pages        = {{19459–19475}},
  publisher    = {{ACL}},
  title        = {{{The illusion of competence: Evaluating the effect of explanations on users’ mental models of visual question answering systems}}},
  doi          = {{10.18653/v1/2024.emnlp-main.1084}},
  year         = {{2024}},
}

@inbook{57238,
  abstract     = {{<jats:p>Abstract argumentation is a popular toolkit for modeling, evaluating, and comparing arguments. Relationships between arguments are specified in argumentation frameworks (AFs), and conditions are placed on sets (extensions) of arguments that allow AFs to be evaluated. For more expressiveness, AFs are augmented with acceptance conditions on directly interacting arguments or a constraint on the admissible sets of arguments, resulting in dialectic frameworks or constrained argumentation frameworks. In this paper, we consider flexible conditions for rejecting an argument from an extension, which we call rejection conditions (RCs). On the technical level, we associate each argument with a specific logic program. We analyze the resulting complexity, including the structural parameter treewidth. Rejection AFs are highly expressive, giving rise to natural problems on higher levels of the polynomial hierarchy.</jats:p>}},
  author       = {{Fichte, Johannes K. and Hecher, Markus and Mahmood, Yasir and Meier, Arne}},
  booktitle    = {{Frontiers in Artificial Intelligence and Applications}},
  isbn         = {{9781643685489}},
  issn         = {{0922-6389}},
  location     = {{Santiago de Compostela, Spain}},
  publisher    = {{IOS Press}},
  title        = {{{Rejection in Abstract Argumentation: Harder Than Acceptance?}}},
  doi          = {{10.3233/faia240867}},
  year         = {{2024}},
}

@inproceedings{55655,
  abstract     = {{<jats:p>Argumentation is a well-established formalism for nonmonotonic reasoning, with popular frameworks being Dung’s abstract argumentation (AFs) or logic-based argumentation (Besnard-Hunter’s framework). Structurally, a set of formulas forms support for a claim if it is consistent, subset-minimal, and implies the claim. Then, an argument comprises support and a claim. We observe that the computational task (ARG) of asking for support of a claim in a knowledge base is “brave”, since many claims with a single support are accepted. As a result, ARG falls short when it comes to the question of confidence in a claim, or claim strength. In this paper, we propose a concept for measuring the (acceptance) strength of claims, based on counting supports for a claim. Further, we settle classical and structural complexity of counting arguments favoring a given claim in propositional knowledge bases (KBs). We introduce quantitative reasoning to measure the strength of claims in a KB and to determine the relevance strength of a formula for a claim.</jats:p>}},
  author       = {{Hecher, Markus and Mahmood, Yasir and Meier, Arne and Schmidt, Johannes}},
  booktitle    = {{Proceedings of the Thirty-ThirdInternational Joint Conference on Artificial Intelligence}},
  publisher    = {{International Joint Conferences on Artificial Intelligence Organization}},
  title        = {{{Quantitative Claim-Centric Reasoning in Logic-Based Argumentation}}},
  doi          = {{10.24963/ijcai.2024/377}},
  year         = {{2024}},
}

@unpublished{57814,
  abstract     = {{We study consistent query answering via different graph representations.
First, we introduce solution-conflict hypergraphs in which nodes represent
facts and edges represent either conflicts or query solutions. Considering a
monotonic query and a set of antimonotonic constraints, we present an explicit
algorithm for counting the number of repairs satisfying the query based on a
tree decomposition of the solution-conflict hypergraph. The algorithm not only
provides fixed-parameter tractability results for data complexity over
expressive query and constraint classes, but also introduces a novel and
potentially implementable approach to repair counting. Second, we consider the
Gaifman graphs arising from MSO descriptions of consistent query answering.
Using a generalization of Courcelle's theorem, we then present fixed-parameter
tractability results for combined complexity over expressive query and
constraint classes.}},
  author       = {{Hankala, Teemu and Hannula, Miika and Mahmood, Yasir and Meier, Arne}},
  booktitle    = {{arXiv:2412.08324}},
  title        = {{{Parameterised Complexity of Consistent Query Answering via Graph  Representations}}},
  year         = {{2024}},
}

@inproceedings{55917,
  abstract     = {{This work takes steps towards situating the concepts relevant to explanation and understanding in explanatory interactions within the scope of Basic Formal Ontology. We introduce novel ontological accounts of understanding and explanation in BFO-terms, which foster a shared conceptualization of explanations and explainee's understanding during explainer-explainee interactions. This approach also enables the tracking of different aspects of understanding and explanation through cognitive profiling of various measurable aspects under the heading of process profile in BFO. Additionally, we differentiate between the private mental process of understanding and understanding displays. Finally, we characterize the relationship between understanding displays and explanations.}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 4th International Workshop on Data Meets Applied Ontologies in Explainable AI (DAO-XAI)}},
  issn         = {{1613-0073}},
  location     = {{Santiago de Compostela, Spain}},
  publisher    = {{International Association for Ontology and its Applications}},
  title        = {{{Towards a BFO-based ontology of understanding in explanatory interactions}}},
  year         = {{2024}},
}

@inproceedings{61177,
  abstract     = {{Human and model-generated texts can be distinguished by examining the magnitude of likelihood in language. However, it is becoming increasingly difficult as language model's capabilities of generating human-like texts keep evolving. This study provides a new perspective by using the relative likelihood values instead of absolute ones, and extracting useful features from the spectrum-view of likelihood for the human-model text detection task. We propose a detection procedure with two classification methods, supervised and heuristic-based, respectively, which results in competitive performances with previous zero-shot detection methods and a new state-of-the-art on short-text detection. Our method can also reveal subtle differences between human and model languages, which find theoretical roots in psycholinguistics studies.}},
  author       = {{Xu, Yang and Wang, Yu and An, Hao and Liu, Zhichen and Li, Yongyuan}},
  booktitle    = {{Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}},
  location     = {{Miami, FL, USA}},
  pages        = {{10108–10121}},
  publisher    = {{ACL}},
  title        = {{{Detecting subtle differences between human and model languages using spectrum of relative likelihood}}},
  doi          = {{10.18653/v1/2024.emnlp-main.564}},
  year         = {{2024}},
}

@inproceedings{56985,
  author       = {{Buschmeier, Hendrik and Kopp, Stefan and Hassan, Teena}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{698--699}},
  publisher    = {{ACM}},
  title        = {{{Multimodal Co-Construction of Explanations with XAI Workshop}}},
  doi          = {{10.1145/3678957.3689205}},
  year         = {{2024}},
}

@inproceedings{55916,
  abstract     = {{To produce explanations that are more likely to be accepted by humans, Explainable Artificial Intelligence (XAI) systems need to incorporate explanation models grounded in human communication patterns. So far, little is known about how an explainee, who lacks understanding of an issue, and an explainer, who has knowledge to fill the explainee's knowledge gap, actively shape an explanation process, and how their involvement relates to explanatory success in terms of maximizing the explainee's level of understanding. In this paper, we characterize explanations as dialogues in which explainee and explainer take turns to advance the explanation process. We build on an existing annotation scheme of ‘explanatory moves’ to characterize such turns, and manually annotate 362 dialogical explanations from the “Explain Like I'm Five” subreddit. Building on the annotated data, we compute correlations between explanatory moves and explanatory success, measured on a five-point Likert scale, in order to identify factors that are significantly correlated with explanatory success. Based on a qualitative analysis of these factors, we develop a conceptual model of the main factors that contribute to the success of explanatory dialogues.}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{373--381}},
  publisher    = {{ACM}},
  title        = {{{A model of factors contributing to the success of dialogical explanations}}},
  doi          = {{10.1145/3678957.3685744}},
  year         = {{2024}},
}

@inproceedings{55995,
  abstract     = {{Scripted dialogues such as movie and TV subtitles constitute a widespread source of training data for conversational NLP models. However, there are notable linguistic differences between these dialogues and spontaneous interactions, especially regarding the occurrence of communicative feedback such as backchannels, acknowledgments, or clarification requests. This paper presents a quantitative analysis of such feedback phenomena in both subtitles and spontaneous conversations. Based on conversational data spanning eight languages and multiple genres, we extract lexical statistics, classifications from a dialogue act tagger, expert annotations and labels derived from a fine-tuned Large Language Model (LLM). Our main empirical findings are that (1) communicative feedback is markedly less frequent in subtitles than in spontaneous dialogues and (2) subtitles contain a higher proportion of negative feedback. We also show that dialogues generated by standard LLMs lie much closer to scripted dialogues than spontaneous interactions in terms of communicative feedback.}},
  author       = {{Pilán, Ildikó and Prévot, Laurent and Buschmeier, Hendrik and Lison, Pierre}},
  booktitle    = {{Proceedings of the 25th Meeting of the Special Interest Group on Discourse and Dialogue}},
  location     = {{Kyoto, Japan}},
  pages        = {{440–457}},
  title        = {{{Conversational feedback in scripted versus spontaneous dialogues: A comparative analysis}}},
  doi          = {{10.18653/v1/2024.sigdial-1.38}},
  year         = {{2024}},
}

@inproceedings{55913,
  abstract     = {{We examined the turn-taking dynamics across different phases of explanatory dialogues, in which 21 different explainers explained a board game to 2–3 explainees each. Turn-taking dynamics are investigated focusing on >19K floor transitions, i.e., the detailed patterns characterizing turn keeping or turn yielding events (Gilmartin et al., 2020). The explanations were characterized by three different phases (board game absent, board game present, interactive game play), for which we observed differences in turn-taking dynamics: explanations where the board game is absent are characterized by less complex floor transitions, while explanations with a concretely shared reference space are characterized by more complex floor transitions, as well as more floor transitions between interlocutors. Also, the speakers’ dialogue role (explainer vs. explainee) appears to have a strong impact on turn-taking dynamics, as floor transitions that do not conform with the dialogue role tend to involve more effort, or floor management work.}},
  author       = {{Wagner, Petra and Włodarczak, Marcin and Buschmeier, Hendrik and Türk, Olcay and Gilmartin, Emer}},
  booktitle    = {{Proceedings of the 28th Workshop on the Semantics and Pragmatics of Dialogue}},
  issn         = {{2308-2275}},
  location     = {{Trento, Italy}},
  pages        = {{6--14}},
  title        = {{{Turn-taking dynamics across different phases of explanatory dialogues}}},
  year         = {{2024}},
}

