@inproceedings{61154,
  author       = {{Türk, Olcay and Lazarov, Stefan Teodorov and Buschmeier, Hendrik and Wagner, Petra and Grimminger, Angela}},
  booktitle    = {{LingCologne 2025 – Book of Abstracts}},
  location     = {{Cologne, Germany}},
  pages        = {{36}},
  title        = {{{Acoustic detection of false positive backchannels of understanding in explanations}}},
  year         = {{2025}},
}

@inproceedings{62164,
  author       = {{Kuhlmann, Michael and Seebauer, Fritz and Wagner, Petra and Häb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2025}},
  publisher    = {{ISCA}},
  title        = {{{Towards Frame-level Quality Predictions of Synthetic Speech}}},
  doi          = {{10.21437/interspeech.2025-2190}},
  year         = {{2025}},
}

@inproceedings{55403,
  abstract     = {{In this paper we consider the interactive processes by which an explainer and an explainee cooperate to produce an explanation, which we refer to as co-construction. Explainable Artificial Intelligence (XAI) is concerned with the development of intelligent systems and robots that can explain and justify their actions, decisions, recommendations, and so on. However, the cooperative construction of explanations remains a key but under-explored issue. This short paper proposes an architecture for intelligent systems that promotes a co-constructive and interactive approach to explanation generation. By outlining its basic components and their specific roles, we aim to contribute to the advancement of XAI computational frameworks that actively engage users in the explanation process.}},
  author       = {{Buschmeier, Hendrik and Cimiano, Philipp and Kopp, Stefan and Kornowicz, Jaroslaw and Lammert, Olesja and Matarese, Marco and Mindlin, Dimitry and Robrecht, Amelie Sophie and Vollmer, Anna-Lisa and Wagner, Petra and Wrede, Britta and Booshehri, Meisam}},
  booktitle    = {{Proceedings of the 2024 Workshop on Explainability Engineering}},
  location     = {{Lisbon, Portugal}},
  pages        = {{20--25}},
  publisher    = {{ACM}},
  title        = {{{Towards a Computational Architecture for Co-Constructive Explainable Systems}}},
  doi          = {{10.1145/3648505.3648509}},
  year         = {{2024}},
}

@inproceedings{55912,
  abstract     = {{In explanatory interactions, explainees are expected to continuously provide feedback to explainers by signaling whether they understand an ongoing explanation. The study presented in this paper is based on the hypothesis that explainees use a set of multimodal cues, including vocalizations, facial expressions, and movements of the torso, head, and hands, to do so. We test this hypothesis by building a random forest classifier based on a multimodal corpus of dyadic explanations (21 explainers and explainees), in which windows of understanding or non-understanding were identified by participants in a retrospective video recall task. Results show that sequences of understanding can indeed be differentiated from those of non-understanding, and that a diverse set of predictors covering a wide range of modalities contributes to this classification. Due to data sparsity and a high degree of individual variation, the generalizability of our results is currently limited, but they support our hypothesis of the relevance of multimodal display in explanatory interactions.}},
  author       = {{Türk, Olcay and Lazarov, Stefan Teodorov and Wang, Yu and Buschmeier, Hendrik and Grimminger, Angela and Wagner, Petra}},
  booktitle    = {{Proceedings of the 26th ACM International Conference on Multimodal Interaction}},
  location     = {{San José, Costa Rica}},
  pages        = {{449--458}},
  title        = {{{Predictability of understanding in explanatory interactions based on multimodal cues}}},
  doi          = {{10.1145/3678957.3685741}},
  year         = {{2024}},
}

@inproceedings{55913,
  abstract     = {{We examined the turn-taking dynamics across different phases of explanatory dialogues, in which 21 different explainers explained a board game to 2–3 explainees each. Turn-taking dynamics are investigated focusing on >19K floor transitions, i.e., the detailed patterns characterizing turn keeping or turn yielding events (Gilmartin et al., 2020). The explanations were characterized by three different phases (board game absent, board game present, interactive game play), for which we observed differences in turn-taking dynamics: explanations where the board game is absent are characterized by less complex floor transitions, while explanations with a concretely shared reference space are characterized by more complex floor transitions, as well as more floor transitions between interlocutors. Also, the speakers’ dialogue role (explainer vs. explainee) appears to have a strong impact on turn-taking dynamics, as floor transitions that do not conform with the dialogue role tend to involve more effort, or floor management work.}},
  author       = {{Wagner, Petra and Włodarczak, Marcin and Buschmeier, Hendrik and Türk, Olcay and Gilmartin, Emer}},
  booktitle    = {{Proceedings of the 28th Workshop on the Semantics and Pragmatics of Dialogue}},
  issn         = {{2308-2275}},
  location     = {{Trento, Italy}},
  pages        = {{6--14}},
  title        = {{{Turn-taking dynamics across different phases of explanatory dialogues}}},
  year         = {{2024}},
}

