@misc{63446,
  booktitle    = {{Navigationen. Zeitschrift für Medien- und Kulturwissenschaften}},
  editor       = {{Dörre, Robert and Laut-Berger, Christina and Pilipets, Elena  and Schulz, Christian}},
  publisher    = {{Universi Verlag}},
  title        = {{{Was waren soziale Medien? Begriffe im Wandel}}},
  volume       = {{1}},
  year         = {{2027}},
}

@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61321,
  author       = {{Grimminger, Angela and Buschmeier, Hendrik}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{351--365}},
  publisher    = {{Springer}},
  title        = {{{Theoretical aspects of multimodal processing}}},
  doi          = {{10.1007/978-981-96-5290-7_18}},
  year         = {{2026}},
}

@inbook{61322,
  author       = {{Lazarov, Stefan Teodorov and Tchappi, Igor and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{367--390}},
  publisher    = {{Springer}},
  title        = {{{Characteristics of nonverbal behavior}}},
  doi          = {{10.1007/978-981-96-5290-7_19}},
  year         = {{2026}},
}

@inbook{61324,
  author       = {{Wagner, Petra and Kopp, Stefan}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{433--446}},
  publisher    = {{Springer}},
  title        = {{{Timing and synchronization of multimodal signals in explanations}}},
  doi          = {{10.1007/978-981-96-5290-7_22}},
  year         = {{2026}},
}

@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@inproceedings{64914,
  abstract     = {{We investigate how verbal and nonverbal linguistic features, exhibited by speakers and listeners in dialogue, can contribute to predicting the listener's state of understanding in explanatory interactions on a moment-by-moment basis. Specifically, we examine three linguistic cues related to cognitive load and hypothesised to correlate with listener understanding: the information value (operationalised with surprisal) and syntactic complexity of the speaker's utterances, and the variation in the listener's interactive gaze behaviour. Based on statistical analyses of the MUNDEX corpus of face-to-face dialogic board game explanations, we find that individual cues vary with the listener's level of understanding. Listener states (‘Understanding’, ‘Partial Understanding’, ‘Non-Understanding’ and ‘Misunderstanding’) were self-annotated by the listeners using a retrospective video-recall method. The results of a subsequent classification experiment, involving two off-the-shelf classifiers and a fine-tuned German BERT-based multimodal classifier, demonstrate that prediction of these four states of understanding is generally possible and improves when the three linguistic cues are considered alongside textual features.}},
  author       = {{Wang, Yu and Türk, Olcay and Grimminger, Angela and Buschmeier, Hendrik}},
  booktitle    = {{Proceedings of the 15th Biennial Language Resources and Evaluation Conference}},
  location     = {{Palma, Mallorca, Spain}},
  publisher    = {{ELRA}},
  title        = {{{Predicting states of understanding in explanatory interactions using cognitive load-related linguistic cues}}},
  doi          = {{10.48550/arXiv.2603.20079}},
  year         = {{2026}},
}

@unpublished{61151,
  abstract     = {{In this paper, we discuss the application of retrospective video recall for the assessment of cognitive processes in explanatory interactions, such as understanding and mental models. Our purpose is to reflect on the benefits and limitations of video recall compared to another self-report method, ‘thinking-aloud’. To do so, we reveal empirical results from the application of video recall in three interdisciplinary research projects that applied the method for the qualitative and quantitative assessment of cognitive and behavioral phenomena in everyday explanations. In all three projects, video recall was applied as a post-hoc procedure following the recording of dyadic face-to-face explanations of board games. The design of the video recall procedure differed between individual projects because they pursued different research objectives – that is the investigation of (1) an interlocutor's multimodal signals of understanding, (2) the change in assumptions about an interlocutor's dispositional and situational knowledge, and (3) the differentiated assessment of an interlocutor's developing understanding of domain knowledge aspects by distinguishing between mechanistic and functional explanatory stances. By discussing the benefits and the limitations of each procedure, this article provides critical reflections on video recall as a versatile research method applied for the analysis of human multimodal behavior in interaction and cognitive processing.}},
  author       = {{Lazarov, Stefan Teodorov and Schaffer, Michael and Gladow, Viviane and Buschmeier, Hendrik and Buhl, Heike M. and Grimminger, Angela}},
  pages        = {{29}},
  title        = {{{Retrospective video recall for analyzing cognitive processes in naturalistic explanations}}},
  year         = {{2026}},
}

@inproceedings{61444,
  abstract     = {{Backchannels and fillers are important linguistic expressions in dialogue, but often treated as ‘noise’ to be bypassed in modern transformer-based language models. Our work studies the representation of them in language models using three fine-tuning strategies. The models are trained on three dialogue corpora in English and Japanese, where backchannels and fillers are preserved and annotated, to investigate how fine-tuning can help LMs learn their representations. We first apply clustering analysis to the learnt representation of backchannels and fillers, and have found increased silhouette scores in representations from fine-tuned models, which suggests that fine-tuning enables LMs to distinguish the nuanced semantic variation in different backchannel and filler use. We also use natural language generation (NLG) metrics and qualitative analysis to confirm that the utterances generated by fine-tuned language models resemble human-produced utterances more closely. Our findings suggest the potentials of transforming general LMs into conversational LMs that are more capable of producing human-like languages adequately.}},
  author       = {{Wang, Yu and Lao, Leyi and Huang, Langchu and Skantze, Gabriel and Xu, Yang and Buschmeier, Hendrik}},
  location     = {{San Diego, CA, USA}},
  title        = {{{Investigating the representation of backchannels and fillers in fine-tuned language models}}},
  year         = {{2026}},
}

@inproceedings{65363,
  abstract     = {{Recent theoretical advancement of information density in natural language has brought the following question on desk: To what degree does natural language exhibit periodicity pattern in its encoded information? We address this question by introducing a new method called AutoPeriod of Surprisal (APS). APS adopts a canonical periodicity detection algorithm and is able to identify any significant periods that exist in the surprisal sequence of a single document. By applying the algorithm to a set of corpora, we have obtained the following interesting results: Firstly, a considerable proportion of human language demonstrates a strong pattern of periodicity in information; Secondly, new periods that are outside the distributions of typical structural units in text (e.g., sentence boundaries, elementary discourse units, etc.) are found and further confirmed via harmonic regression modeling. We conclude that the periodicity of information in language is a joint outcome from both structured factors and other driving factors that take effect at longer distances. The advantages of our periodicity detection method and its potentials in LLM-generation detection are further discussed.}},
  author       = {{Ou, Yulin and Wang, Yu and Xu, Yang and Buschmeier, Hendrik}},
  location     = {{San Diego, CA, USA}},
  title        = {{{Identifying the periodicity of information in natural language}}},
  year         = {{2026}},
}

@article{59756,
  abstract     = {{A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.
In this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.}},
  author       = {{Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}},
  issn         = {{1389-0417}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{XAI, Appropriate trust, Distrust, Reliance, Human-centric evaluation, Trustworthy AI}},
  publisher    = {{Elsevier BV}},
  title        = {{{Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}}},
  doi          = {{10.1016/j.cogsys.2025.101357}},
  year         = {{2025}},
}

@inproceedings{59999,
  author       = {{Rautenberg, Frederik and Kuhlmann, Michael and Seebauer, Fritz and Wiechmann, Jana and Wagner, Petra and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  location     = {{Hyderabad, India }},
  publisher    = {{IEEE}},
  title        = {{{Speech Synthesis along Perceptual Voice Quality Dimensions}}},
  doi          = {{10.1109/icassp49660.2025.10888012}},
  year         = {{2025}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@article{60935,
  abstract     = {{Research suggests that presenting an action via multimodal stimulation (verbal and visual) enhances its perception. To highlight this, in most studies, assertive instructions are generally presented before the occurrence of the visual subevent(s). However, verbal instructions need not always be assertive; they can also include negation to contrast the present event with a prior one, thereby facilitating processing—a phenomenon known as contextual facilitation. In our study, we investigated whether using negation to guide an action sequence facilitates action perception, particularly when two consecutive subactions contrast with each other. Stimuli from previous studies on action demonstration were used to create (non)contrastive actions, that is, a ball following noncontrastive and identical (Over–Over or Under–Under) versus contrastive and opposite paths (Over–Under or Under–Over) before terminating at a goal location. In Experiment 1, either an assertive or a negative instruction was provided as verbal guidance before onset of each path. Analyzing data from 35 participants, we found that, whereas assertive instructions facilitate overall action recall, negating the later path for contrastive actions is equally facilitative. Given that action goal is the most salient aspect in event memory due to goal-path bias in attention, a second experiment was conducted to test the effect of multimodal synchrony on goal attention and action memory. Experiment 2 revealed that when instructions overlap with actions, they become more tailored—assertive instructions effectively guide noncontrastive actions, while assertive–negative instruction particularly guides contrastive actions. Both studies suggest that increased attention to the goal leads to coarser perception of midevents, with action-instruction synchrony modulating goal bias in real-time event apprehension to serve distinct purposes for action conceptualization. Whereas presenting instructions before subactions attenuates goal attention, overlapping instructions increase goal attention and reveal the selective roles of assertive and negative instructions in guiding contrastive and noncontrastive actions.}},
  author       = {{Singh, Amit and Rohlfing, Katharina J.}},
  journal      = {{Cognitive Science}},
  number       = {{8}},
  publisher    = {{Wiley}},
  title        = {{{Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?}}},
  doi          = {{10.1111/cogs.70096}},
  volume       = {{49}},
  year         = {{2025}},
}

@inproceedings{61047,
  author       = {{Rautenberg, Frederik and Seebauer, Fritz and Wiechmann, Jana and Kuhlmann, Michael and Wagner, Petra and Haeb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2025}},
  location     = {{Rotterdam}},
  publisher    = {{ISCA}},
  title        = {{{Synthesizing Speech with Selected Perceptual Voice Qualities – A Case Study with Creaky Voice}}},
  doi          = {{10.21437/Interspeech.2025-1443}},
  year         = {{2025}},
}

@inproceedings{61154,
  author       = {{Türk, Olcay and Lazarov, Stefan Teodorov and Buschmeier, Hendrik and Wagner, Petra and Grimminger, Angela}},
  booktitle    = {{LingCologne 2025 – Book of Abstracts}},
  location     = {{Cologne, Germany}},
  pages        = {{36}},
  title        = {{{Acoustic detection of false positive backchannels of understanding in explanations}}},
  year         = {{2025}},
}

