@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@inproceedings{53818,
  author       = {{Krings, Sarah Claudia and Biermeier, Kai and Yigitbas, Enes}},
  booktitle    = {{Proceedings of the 10th International Working Conference on Human-Centered Software Engineering (HCSE'24)}},
  title        = {{{Interaction Techniques for Remote Maintenance in an AR Shared Environment}}},
  year         = {{2024}},
}

@inproceedings{53816,
  abstract     = {{Augmented (AR) and Virtual Reality (VR) technologies have been applied very broadly in the recent past. While prior work emphasizes the potential of these technologies in various application domains, the process of visual attention in and across the contexts of AR/VR environments is not exhaustively explored yet. By now, visual attention in AR/VR environments has majorly been studied by means of overt attention (i.e. saccadic eye movements), self-report, and process-related visual attention proxies (like reaction time). In this work, we analyze covert visual attention based on the (psychological) Theory of Visual Attention (TVA), which allows us to quantify theory-based interpretable properties of the visual attention process. For example, the TVA allows us to measure the overall processing speed. We instantiate this TVA-based framework with a 30-participant explorative within-subjects study. The results show a decisive difference in visual attention between Reality (i.e. the neutral condition) and Virtual Reality and a weak difference between Reality and Augmented Reality. We discuss the consequences of our findings and provide ideas for future studies.}},
  author       = {{Biermeier, Kai and Scharlau, Ingrid and Yigitbas, Enes}},
  booktitle    = {{Proceedings of the 17th International Conference on PErvasive Technologies Related to Assistive Environments (PETRA 2024)}},
  keywords     = {{Visual Attention, TVA, Cognitive Modelling, Bayesian Modelling, AR, VR}},
  publisher    = {{ACM}},
  title        = {{{Measuring Visual Attention Capacity Across xReality}}},
  doi          = {{10.1145/3652037.3652050}},
  year         = {{2024}},
}

@article{59888,
  abstract     = {{Everyday explanations are interactive processes with the aim to provide a less knowledgeable person with reasonable information about other people, objects, or events. Because explanations are interactive communicative processes, the topical structure of an explanation may vary dynamically depending on the immediate feedback of the explainee. In this paper, we analyse topical transitions in medical explanations organised by different physicians (explainers) related to different forms of multimodal behaviour of caregivers (explainees) attending an explanation about the procedures of
an upcoming surgery of a child. The analyses reveal that explainees’ multimodal behaviour with gaze shifts (and particularly gaze aversion) can predict a transition from an elaborated topic to a new one, whereas explainees’ forms of multimodal behaviour with static gaze cannot be related to changes of the topical structure.}},
  author       = {{Lazarov, Stefan Teodorov and Biermeier, Kai and Grimminger, Angela}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{explanations, multimodal behaviour, elaborations, conditional probabilities}},
  number       = {{3}},
  pages        = {{257 -- 280}},
  publisher    = {{John Benjamins}},
  title        = {{{Changes in the topical structure of explanations are related to explainees’ multimodal behaviour}}},
  doi          = {{10.1075/is.23033.laz}},
  volume       = {{25}},
  year         = {{2024}},
}

@inproceedings{30883,
  author       = {{Krings, Sarah Claudia and Yigitbas, Enes and Biermeier, Kai and Engels, Gregor}},
  booktitle    = {{Proceedings of the 14th ACM SIGCHI Symposium on Engineering Interactive Computing Systems (EICS 2022)}},
  title        = {{{Design and Evaluation of AR-Assisted End-User Robot Path Planning Strategies}}},
  year         = {{2022}},
}

@inproceedings{22285,
  author       = {{Biermeier, Kai and Yigitbas, Enes and Weidmann, Nils and Engels, Gregor}},
  booktitle    = {{Proceedings of the International Workshop on Human-Centered Software Engineering for Changing Contexts of Use }},
  title        = {{{Ensuring User Interface Adaptation Consistency through Triple Graph Grammers}}},
  year         = {{2021}},
}

@article{15266,
  author       = {{Yigitbas, Enes and Jovanovikj, Ivan and Biermeier, Kai and Sauer, Stefan and Engels, Gregor}},
  journal      = {{International Journal on Software and Systems Modeling (SoSyM)}},
  publisher    = {{Springer}},
  title        = {{{Integrated Model-driven Development of Self-adaptive User Interfaces }}},
  year         = {{2020}},
}

