@misc{63446,
  booktitle    = {{Navigationen. Zeitschrift für Medien- und Kulturwissenschaften}},
  editor       = {{Dörre, Robert and Laut-Berger, Christina and Pilipets, Elena  and Schulz, Christian}},
  publisher    = {{Universi Verlag}},
  title        = {{{Was waren soziale Medien? Begriffe im Wandel}}},
  volume       = {{1}},
  year         = {{2027}},
}

@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@inproceedings{64129,
  abstract     = {{<jats:p>Selecting scan angles such that surface segments are aligned with straight X-ray paths (i.e., rays are tangential to the surface and therefore perpendicular to the local surface normal) is known to produce sharper transitions of those surface segments in the reconstructed volume. This enhances dimensional accuracy in sparse-view computed tomography (CT). However, existing approaches offer no direct means to exploit this criterion for automatic scan-angle optimization. We propose a method that uses a virtual representation of the CT setup, including an STL surface model of the inspected part, to automatically identify taskspecific scan angles. Using elementary vector calculus, the algorithm determines projection directions that generate tangential X-rays for targeted surface segments. To support different levels of geometric complexity, we introduce two variants of the angle-selection procedure. The methods were experimentally validated on two objects with distinct absorption and geometric characteristics. For a steel gauge block, employing the minimum number of task-specific projections required for surface-data completeness substantially outperformed a conventional high-projection scan. For a geometrically more complex test object, surface-related errors were still reduced within the region of interest. The proposed approach – particularly suited for flat surface structures and not accounting for image-degrading factors other than cone-beam artifacts – shows promise for high-throughput dimensional metrology of mono-material parts.</jats:p>}},
  author       = {{Butzhammer, Lorenz and Braun, Matthias Robert Oskar and Herath, Colin and Hausotte, Tino}},
  booktitle    = {{e-Journal of Nondestructive Testing}},
  issn         = {{1435-4934}},
  location     = {{Linz}},
  number       = {{3}},
  publisher    = {{NDT.net GmbH & Co. KG}},
  title        = {{{Higher accuracy with fewer projections? Automated scan angle selection for dimensional Computed Tomography based on a simple data completeness measure for the part surface}}},
  doi          = {{10.58286/32560}},
  volume       = {{31}},
  year         = {{2026}},
}

@article{64877,
  author       = {{Taheri, Behnood and Kopylov, Denis and Hammer, Manfred and Meier, Torsten and Förstner, Jens and Sharapova, Polina R.}},
  journal      = {{arXiv}},
  title        = {{{Gain-induced spectral non-degeneracy in type-II parametric down-conversion}}},
  doi          = {{10.48550/ARXIV.2603.01656}},
  year         = {{2026}},
}

@inbook{61323,
  author       = {{Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{227--245}},
  publisher    = {{Springer}},
  title        = {{{Incremental communication}}},
  doi          = {{10.1007/978-981-96-5290-7_12}},
  year         = {{2026}},
}

@inbook{61321,
  author       = {{Grimminger, Angela and Buschmeier, Hendrik}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{351--365}},
  publisher    = {{Springer}},
  title        = {{{Theoretical aspects of multimodal processing}}},
  doi          = {{10.1007/978-981-96-5290-7_18}},
  year         = {{2026}},
}

@inbook{61322,
  author       = {{Lazarov, Stefan Teodorov and Tchappi, Igor and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{367--390}},
  publisher    = {{Springer}},
  title        = {{{Characteristics of nonverbal behavior}}},
  doi          = {{10.1007/978-981-96-5290-7_19}},
  year         = {{2026}},
}

@inbook{61324,
  author       = {{Wagner, Petra and Kopp, Stefan}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}},
  pages        = {{433--446}},
  publisher    = {{Springer}},
  title        = {{{Timing and synchronization of multimodal signals in explanations}}},
  doi          = {{10.1007/978-981-96-5290-7_22}},
  year         = {{2026}},
}

@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@inbook{65083,
  author       = {{Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{247--267}},
  publisher    = {{Springer}},
  title        = {{{Adaptation}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_13}},
  year         = {{2026}},
}

@inproceedings{64914,
  abstract     = {{We investigate how verbal and nonverbal linguistic features, exhibited by speakers and listeners in dialogue, can contribute to predicting the listener's state of understanding in explanatory interactions on a moment-by-moment basis. Specifically, we examine three linguistic cues related to cognitive load and hypothesised to correlate with listener understanding: the information value (operationalised with surprisal) and syntactic complexity of the speaker's utterances, and the variation in the listener's interactive gaze behaviour. Based on statistical analyses of the MUNDEX corpus of face-to-face dialogic board game explanations, we find that individual cues vary with the listener's level of understanding. Listener states (‘Understanding’, ‘Partial Understanding’, ‘Non-Understanding’ and ‘Misunderstanding’) were self-annotated by the listeners using a retrospective video-recall method. The results of a subsequent classification experiment, involving two off-the-shelf classifiers and a fine-tuned German BERT-based multimodal classifier, demonstrate that prediction of these four states of understanding is generally possible and improves when the three linguistic cues are considered alongside textual features.}},
  author       = {{Wang, Yu and Türk, Olcay and Grimminger, Angela and Buschmeier, Hendrik}},
  booktitle    = {{Proceedings of the 15th Biennial Language Resources and Evaluation Conference}},
  location     = {{Palma, Mallorca, Spain}},
  publisher    = {{ELRA}},
  title        = {{{Predicting states of understanding in explanatory interactions using cognitive load-related linguistic cues}}},
  doi          = {{10.48550/arXiv.2603.20079}},
  year         = {{2026}},
}

@article{65153,
  author       = {{Butzhammer, Lorenz}},
  issn         = {{0141-6359}},
  journal      = {{Precision Engineering}},
  pages        = {{377--400}},
  publisher    = {{Elsevier BV}},
  title        = {{{Conversion between detector- and rotary-table-related misalignment parameterisations for unified projection-matrix-based geometry calibration in dimensional X-ray computed tomography}}},
  doi          = {{10.1016/j.precisioneng.2026.03.015}},
  volume       = {{100}},
  year         = {{2026}},
}

@unpublished{61151,
  abstract     = {{In this paper, we discuss the application of retrospective video recall for the assessment of cognitive processes in explanatory interactions, such as understanding and mental models. Our purpose is to reflect on the benefits and limitations of video recall compared to another self-report method, ‘thinking-aloud’. To do so, we reveal empirical results from the application of video recall in three interdisciplinary research projects that applied the method for the qualitative and quantitative assessment of cognitive and behavioral phenomena in everyday explanations. In all three projects, video recall was applied as a post-hoc procedure following the recording of dyadic face-to-face explanations of board games. The design of the video recall procedure differed between individual projects because they pursued different research objectives – that is the investigation of (1) an interlocutor's multimodal signals of understanding, (2) the change in assumptions about an interlocutor's dispositional and situational knowledge, and (3) the differentiated assessment of an interlocutor's developing understanding of domain knowledge aspects by distinguishing between mechanistic and functional explanatory stances. By discussing the benefits and the limitations of each procedure, this article provides critical reflections on video recall as a versatile research method applied for the analysis of human multimodal behavior in interaction and cognitive processing.}},
  author       = {{Lazarov, Stefan Teodorov and Schaffer, Michael and Gladow, Viviane and Buschmeier, Hendrik and Buhl, Heike M. and Grimminger, Angela}},
  pages        = {{29}},
  title        = {{{Retrospective video recall for analyzing cognitive processes in naturalistic explanations}}},
  year         = {{2026}},
}

@inproceedings{65261,
  author       = {{Trentinaglia, Roman and Koch, Thorsten and Bodden, Eric}},
  booktitle    = {{Proceedings of the 14th International Conference on Model-Based Software and Systems Engineering}},
  publisher    = {{SCITEPRESS - Science and Technology Publications}},
  title        = {{{Using Attack and Failure Propagation Analysis for Context-Aware Security Control Suggestions}}},
  doi          = {{10.5220/0014278000004058}},
  year         = {{2026}},
}

@inproceedings{61444,
  abstract     = {{Backchannels and fillers are important linguistic expressions in dialogue, but often treated as ‘noise’ to be bypassed in modern transformer-based language models. Our work studies the representation of them in language models using three fine-tuning strategies. The models are trained on three dialogue corpora in English and Japanese, where backchannels and fillers are preserved and annotated, to investigate how fine-tuning can help LMs learn their representations. We first apply clustering analysis to the learnt representation of backchannels and fillers, and have found increased silhouette scores in representations from fine-tuned models, which suggests that fine-tuning enables LMs to distinguish the nuanced semantic variation in different backchannel and filler use. We also use natural language generation (NLG) metrics and qualitative analysis to confirm that the utterances generated by fine-tuned language models resemble human-produced utterances more closely. Our findings suggest the potentials of transforming general LMs into conversational LMs that are more capable of producing human-like languages adequately.}},
  author       = {{Wang, Yu and Lao, Leyi and Huang, Langchu and Skantze, Gabriel and Xu, Yang and Buschmeier, Hendrik}},
  location     = {{San Diego, CA, USA}},
  title        = {{{Investigating the representation of backchannels and fillers in fine-tuned language models}}},
  year         = {{2026}},
}

@inproceedings{65363,
  abstract     = {{Recent theoretical advancement of information density in natural language has brought the following question on desk: To what degree does natural language exhibit periodicity pattern in its encoded information? We address this question by introducing a new method called AutoPeriod of Surprisal (APS). APS adopts a canonical periodicity detection algorithm and is able to identify any significant periods that exist in the surprisal sequence of a single document. By applying the algorithm to a set of corpora, we have obtained the following interesting results: Firstly, a considerable proportion of human language demonstrates a strong pattern of periodicity in information; Secondly, new periods that are outside the distributions of typical structural units in text (e.g., sentence boundaries, elementary discourse units, etc.) are found and further confirmed via harmonic regression modeling. We conclude that the periodicity of information in language is a joint outcome from both structured factors and other driving factors that take effect at longer distances. The advantages of our periodicity detection method and its potentials in LLM-generation detection are further discussed.}},
  author       = {{Ou, Yulin and Wang, Yu and Xu, Yang and Buschmeier, Hendrik}},
  location     = {{San Diego, CA, USA}},
  title        = {{{Identifying the periodicity of information in natural language}}},
  year         = {{2026}},
}

@article{65476,
  abstract     = {{Service research has evolved into an interdisciplinary research field that bridges diverse disciplines, including information systems (IS) and marketing. Nearly two decades ago, the service system concept was introduced as a foundational abstraction in service research, drawing on ideas from the service-dominant logic (S-D logic) of marketing. Despite its widespread adoption in service research, particularly in the IS discipline, the service system concept lacks a solid theoretical foundation. This has resulted in conceptual ambiguity and overlap with related constructs, such as service ecosystems. Moreover, it has largely remained a static analytical lens, insufficiently capturing dynamic service phenomena, including value co-creation and co-destruction, as well as the emergence of institutional arrangements. To address these limitations, we propose Luhmann’s systems theory (LST) as a robust framework for conceptualizing service systems as autopoietic (self-creating) systems, in which communication serves as the fundamental mechanism that drives value co-creation. We derive five theoretical propositions from this re-conceptualization that clarify conceptual ambiguity and allow researchers to explore dynamic service phenomena in greater depth. Given LST’s general approach, our conceptualization provides a theoretically grounded, interdisciplinary foundation for advancing service research.}},
  author       = {{Beverungen, Daniel and Poeppelbuss, Jens and Hemmrich, Simon and Iqbal, Taskeen}},
  issn         = {{1019-6781}},
  journal      = {{Electronic Markets}},
  keywords     = {{Service system, Service ecosystem, Systems theory, Service research}},
  number       = {{1}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Service through communication—Conceptualizing service systems with Luhmann’s systems theory}}},
  doi          = {{10.1007/s12525-026-00889-w}},
  volume       = {{36}},
  year         = {{2026}},
}

@article{65565,
  abstract     = {{<jats:title>Abstract</jats:title>
                  <jats:p>Gaze behavior, being continuously accessible to interlocutors in face-to-face interactions, serves as a cue for managing turn-taking, regulating the duration of topical sequences, and supporting cognitive processing in various everyday conversational contexts. The present study seeks to enhance the understanding of the relation between two forms of interactive gaze behavior – gaze aversions and mutual gaze – and the topical development in the explanatory discourse. To do so, we analyzed 24 dyadic board game explanations in which one explainer subsequently explained a board game to three different explainees while the board game was physically absent from the shared space. The main objective of the present study was to investigate the relation of gaze aversions and mutual gaze to the topical development of explanations. For this, based on previous research (Lazarov et al., 2024; Rossano, 2012) we hypothesized that (1) gaze aversions are more likely to be associated with topic changes than topic continuations, and that (2) mutual gaze is more likely to be associated with topic continuations than topic changes. In addition, we explored how the two forms of gaze behavior are related to the interlocutor who initiates a topic change or continuation. Our proportional analysis using a Generalized linear mixed effects model revealed that gaze aversions are related to topic changes initiated by both interlocutors. In contrast, the analysis did not reveal a significant relation between mutual gaze and topic continuations, which could be explained by the feedback elicitation function of mutual gaze at the end of speakers’ utterances (Bavelas et al., 2002; Brône et al., 2017; Kendon, 1967) while monitoring the addressees’ understanding (Clark &amp; Krych, 2004) and the complexity of the analyzed fixed and random effects.</jats:p>}},
  author       = {{Lazarov, Stefan Teodorov and Grimminger, Angela}},
  issn         = {{0191-5886}},
  journal      = {{Journal of Nonverbal Behavior}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{How are Gaze Aversions and Mutual Gaze Related to the Topical Development of Dyadic Explanatory Interactions?}}},
  doi          = {{10.1007/s10919-026-00512-8}},
  year         = {{2026}},
}

