@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@inbook{61150,
  abstract     = {{Since the emergence of the field of eXplainable Artificial Intelligence (XAI), a growing number of researchers have argued that XAI should consider insights from the social sciences in order to adapt explanations to the expectations and needs of human users. This has led to the emergence of a field called Social XAI, which is concerned with understanding how explanations are actively shaped in the interaction between a human user and an AI system. Recognizing this turn in XAI toward making XAI systems more “social” by providing explanations that focus on human information needs and incorporating insights from human–human explanatory interactions, in this paper we provide a formal foundation for Social XAI. We do so by proposing novel ontological accounts of the key terms used in Social XAI based on Basic Formal Ontology (BFO). Specifically, we provide novel ontological accounts for explanandum, explanans, understanding, explanation, explainer, explainee, and context. In doing so, we discuss multifaceted entities in Social XAI (having both continuant and occurrent facets; e.g., explanation) and the relationship between understanding and explanation. Additionally, we propose solutions to seemingly paradoxical views on some terms (e.g., social constructivist vs. individual constructivist perspective on explanandum).}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 15th International Conference on Formal Ontology in Information Systems}},
  isbn         = {{9781643686172}},
  issn         = {{0922-6389}},
  location     = {{Catania, Italy}},
  pages        = {{255–268}},
  publisher    = {{IOS Press}},
  title        = {{{A BFO-based ontological analysis of entities in Social XAI}}},
  doi          = {{10.3233/faia250498}},
  year         = {{2025}},
}

@inproceedings{61153,
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Abstracts of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  location     = {{Bielefeld, Germany}},
  title        = {{{A BFO-based ontology of context for Social XAI}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{59856,
  abstract     = {{Recent advances on instruction fine-tuning have led to the development of various prompting techniques for large language models, such as explicit reasoning steps. However, the success of techniques depends on various parameters, such as the task, language model, and context provided. Finding an effective prompt is, therefore, often a trial-and-error process. Most existing approaches to automatic prompting aim to optimize individual techniques instead of compositions of techniques and their dependence on the input. To fill this gap, we propose an adaptive prompting approach that predicts the optimal prompt composition ad-hoc for a given input. We apply our approach to social bias detection, a highly context-dependent task that requires semantic understanding. We evaluate it with three large language models on three datasets, comparing compositions to individual techniques and other baselines. The results underline the importance of finding an effective prompt composition. Our approach robustly ensures high detection performance, and is best in several settings. Moreover, first experiments on other tasks support its generalizability.}},
  author       = {{Spliethöver, Maximilian and Knebler, Tim and Fumagalli, Fabian and Muschalik, Maximilian and Hammer, Barbara and Hüllermeier, Eyke and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Chiruzzo, Luis and Ritter, Alan and Wang, Lu}},
  isbn         = {{979-8-89176-189-6}},
  pages        = {{2421–2449}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Adaptive Prompting: Ad-hoc Prompt Composition for Social Bias Detection}}},
  year         = {{2025}},
}

@inproceedings{55404,
  abstract     = {{Explanations are pervasive in our lives. Mostly, they occur in dialogical form where an explainer discusses a concept or phenomenon of interest with an explainee. Leaving the explainee with a clear understanding is not straightforward due to the knowledge gap between the two participants. Previous research looked at the interaction of explanation moves, dialogue acts, and topics in successful dialogues with expert explainers. However, daily-life explanations often fail, raising the question of what makes a dialogue successful. In this work, we study explanation dialogues in terms of the interactions between the explainer and explainee and how they correlate with the quality of explanations in terms of a successful understanding on the explainee{’}s side. In particular, we first construct a corpus of 399 dialogues from the Reddit forum {Explain Like I am Five} and annotate it for interaction flows and explanation quality. We then analyze the interaction flows, comparing them to those appearing in expert dialogues. Finally, we encode the interaction flows using two language models that can handle long inputs, and we provide empirical evidence for the effectiveness boost gained through the encoding in predicting the success of explanation dialogues.}},
  author       = {{Alshomary, Milad and Lange, Felix and Booshehri, Meisam and Sengupta, Meghdut and Cimiano, Philipp and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)}},
  editor       = {{Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro and Sakti, Sakriani and Xue, Nianwen}},
  pages        = {{11523–11536}},
  publisher    = {{ELRA and ICCL}},
  title        = {{{Modeling the Quality of Dialogical Explanations}}},
  year         = {{2024}},
}

@inproceedings{55403,
  abstract     = {{In this paper we consider the interactive processes by which an explainer and an explainee cooperate to produce an explanation, which we refer to as co-construction. Explainable Artificial Intelligence (XAI) is concerned with the development of intelligent systems and robots that can explain and justify their actions, decisions, recommendations, and so on. However, the cooperative construction of explanations remains a key but under-explored issue. This short paper proposes an architecture for intelligent systems that promotes a co-constructive and interactive approach to explanation generation. By outlining its basic components and their specific roles, we aim to contribute to the advancement of XAI computational frameworks that actively engage users in the explanation process.}},
  author       = {{Buschmeier, Hendrik and Cimiano, Philipp and Kopp, Stefan and Kornowicz, Jaroslaw and Lammert, Olesja and Matarese, Marco and Mindlin, Dimitry and Robrecht, Amelie Sophie and Vollmer, Anna-Lisa and Wagner, Petra and Wrede, Britta and Booshehri, Meisam}},
  booktitle    = {{Proceedings of the 2024 Workshop on Explainability Engineering}},
  location     = {{Lisbon, Portugal}},
  pages        = {{20--25}},
  publisher    = {{ACM}},
  title        = {{{Towards a Computational Architecture for Co-Constructive Explainable Systems}}},
  doi          = {{10.1145/3648505.3648509}},
  year         = {{2024}},
}

@inproceedings{55917,
  abstract     = {{This work takes steps towards situating the concepts relevant to explanation and understanding in explanatory interactions within the scope of Basic Formal Ontology. We introduce novel ontological accounts of understanding and explanation in BFO-terms, which foster a shared conceptualization of explanations and explainee's understanding during explainer-explainee interactions. This approach also enables the tracking of different aspects of understanding and explanation through cognitive profiling of various measurable aspects under the heading of process profile in BFO. Additionally, we differentiate between the private mental process of understanding and understanding displays. Finally, we characterize the relationship between understanding displays and explanations.}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 4th International Workshop on Data Meets Applied Ontologies in Explainable AI (DAO-XAI)}},
  issn         = {{1613-0073}},
  location     = {{Santiago de Compostela, Spain}},
  publisher    = {{International Association for Ontology and its Applications}},
  title        = {{{Towards a BFO-based ontology of understanding in explanatory interactions}}},
  year         = {{2024}},
}

@inproceedings{55916,
  abstract     = {{To produce explanations that are more likely to be accepted by humans, Explainable Artificial Intelligence (XAI) systems need to incorporate explanation models grounded in human communication patterns. So far, little is known about how an explainee, who lacks understanding of an issue, and an explainer, who has knowledge to fill the explainee's knowledge gap, actively shape an explanation process, and how their involvement relates to explanatory success in terms of maximizing the explainee's level of understanding. In this paper, we characterize explanations as dialogues in which explainee and explainer take turns to advance the explanation process. We build on an existing annotation scheme of ‘explanatory moves’ to characterize such turns, and manually annotate 362 dialogical explanations from the “Explain Like I'm Five” subreddit. Building on the annotated data, we compute correlations between explanatory moves and explanatory success, measured on a five-point Likert scale, in order to identify factors that are significantly correlated with explanatory success. Based on a qualitative analysis of these factors, we develop a conceptual model of the main factors that contribute to the success of explanatory dialogues.}},
  author       = {{Booshehri, Meisam and Buschmeier, Hendrik and Cimiano, Philipp}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{373--381}},
  publisher    = {{ACM}},
  title        = {{{A model of factors contributing to the success of dialogical explanations}}},
  doi          = {{10.1145/3678957.3685744}},
  year         = {{2024}},
}

@inproceedings{58722,
  abstract     = {{Dialects introduce syntactic and lexical variations in language that occur in regional or social groups. Most NLP methods are not sensitive to such variations. This may lead to unfair behavior of the methods, conveying negative bias towards dialect speakers. While previous work has studied dialect-related fairness for aspects like hate speech, other aspects of biased language, such as lewdness, remain fully unexplored. To fill this gap, we investigate performance disparities between dialects in the detection of five aspects of biased language and how to mitigate them. To alleviate bias, we present a multitask learning approach that models dialect language as an auxiliary task to incorporate syntactic and lexical variations. In our experiments with African-American English dialect, we provide empirical evidence that complementing common learning approaches with dialect modeling improves their fairness. Furthermore, the results suggest that multitask learning achieves state-of-the-art performance and helps to detect properties of biased language more reliably.}},
  author       = {{Spliethöver, Maximilian and Menon, Sai Nikhil and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: ACL 2024}},
  editor       = {{Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek}},
  pages        = {{9294–9313}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Disentangling Dialect from Social Bias via Multitask Learning to Improve Fairness}}},
  doi          = {{10.18653/v1/2024.findings-acl.553}},
  year         = {{2024}},
}

@inproceedings{58723,
  abstract     = {{In real-world debates, the most common way to counter an argument is to reason against its main point, that is, its conclusion. Existing work on the automatic generation of natural language counter-arguments does not address the relation to the conclusion, possibly because many arguments leave their conclusion implicit. In this paper, we hypothesize that the key to effective counter-argument generation is to explicitly model the argument‘s conclusion and to ensure that the stance of the generated counter is opposite to that conclusion. In particular, we propose a multitask approach that jointly learns to generate both the conclusion and the counter of an input argument. The approach employs a stance-based ranking component that selects the counter from a diverse set of generated candidates whose stance best opposes the generated conclusion. In both automatic and manual evaluation, we provide evidence that our approach generates more relevant and stance-adhering counters than strong baselines.}},
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics}},
  editor       = {{Vlachos, Andreas and Augenstein, Isabelle}},
  pages        = {{957–967}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Conclusion-based Counter-Argument Generation}}},
  doi          = {{10.18653/v1/2023.eacl-main.67}},
  year         = {{2023}},
}

@inproceedings{55337,
  abstract     = {{As AI is more and more pervasive in everyday life, humans have an increasing demand to understand its behavior and decisions. Most research on explainable AI builds on the premise that there is one ideal explanation to be found. In fact, however, everyday explanations are co-constructed in a dialogue between the person explaining (the explainer) and the specific person being explained to (the explainee). In this paper, we introduce a first corpus of dialogical explanations to enable NLP research on how humans explain as well as on how AI can learn to imitate this process. The corpus consists of 65 transcribed English dialogues from the Wired video series 5 Levels, explaining 13 topics to five explainees of different proficiency. All 1550 dialogue turns have been manually labeled by five independent professionals for the topic discussed as well as for the dialogue act and the explanation move performed. We analyze linguistic patterns of explainers and explainees, and we explore differences across proficiency levels. BERT-based baseline results indicate that sequence information helps predicting topics, acts, and moves effectively.}},
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  editor       = {{Calzolari, Nicoletta and Huang, Chu-Ren and Kim, Hansaem and Pustejovsky, James and Wanner, Leo and Choi, Key-Sun and Ryu, Pum-Mo and Chen, Hsin-Hsi and Donatelli, Lucia and Ji, Heng and Kurohashi, Sadao and Paggio, Patrizia and Xue, Nianwen and Kim, Seokhwan and Hahm, Younggyun and He, Zhong and Lee, Tony Kyungil and Santus, Enrico and Bond, Francis and Na, Seung-Hoon}},
  pages        = {{344–354}},
  publisher    = {{International Committee on Computational Linguistics}},
  title        = {{{“Mama Always Had a Way of Explaining Things So I Could Understand”: A Dialogue Corpus for Learning to Construct Explanations}}},
  year         = {{2022}},
}

@inproceedings{32247,
  author       = {{Alshomary, Milad and Rieskamp, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th International Conference on Computational Models of Argument}},
  pages        = {{21 -- 31}},
  title        = {{{Generating Contrastive Snippets for Argument Search}}},
  doi          = {{http://dx.doi.org/10.3233/FAIA220138}},
  year         = {{2022}},
}

@inproceedings{30840,
  author       = {{Alshomary, Milad and El Baff, Roxanne and Gurcke, Timon and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics}},
  pages        = {{8782 -- 8797}},
  title        = {{{The Moral Debater: A Study on the Computational Generation of Morally Framed Arguments}}},
  year         = {{2022}},
}

