@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@inproceedings{64827,
  author       = {{Porwol, Philip Fabian and Körber, Miriam and Kern, Friederike  and Schulte, Carsten and Scharlau, Ingrid}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philip and Paaßen, Benjamin and Vollmer, Anna-Lisa}},
  location     = {{Bielefeld}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Framing what and how to think: Lay people’s metaphors for algorithms}}},
  doi          = {{10.64136/ubio9074}},
  year         = {{2026}},
}

@inbook{59754,
  author       = {{Scharlau, Ingrid and Seifert, Andreas}},
  booktitle    = {{Psychologiedidaktik an allgemeinbildenden und beruflichen Schulen: Ein Lehrbuch mit Unterrichtsmaterialien}},
  editor       = {{Scharlau, Ingrid and Bender, Elena and Patrzek, Justine and Schreiber, Christine}},
  isbn         = {{978-3-662-69480-1}},
  pages        = {{339--365}},
  publisher    = {{Springer Nature}},
  title        = {{{Empirische Methoden der psychologiedidaktischen Forschung}}},
  year         = {{2025}},
}

@inbook{59752,
  author       = {{Scharlau, Ingrid and Patrzek, Justine and Schreiber, Christine}},
  booktitle    = {{Psychologiedidaktik an allgemeinbildenden und beruflichen Schulen: Ein Lehrbuch mit Unterrichtsmaterialien}},
  editor       = {{Scharlau, Ingrid and Bender, Elena and Patrzek, Justine and Schreiber, Christine}},
  isbn         = {{978-3-662-69480-0}},
  pages        = {{89--118}},
  publisher    = {{Springer Nature}},
  title        = {{{Psychologiedidaktik durch Analyse von Kommunikation}}},
  year         = {{2025}},
}

@inbook{59753,
  author       = {{Scharlau, Ingrid and Christine, Schreiber}},
  booktitle    = {{Psychologiedidaktik an allgemeinbildenden und beruflichen Schulen: Ein Lehrbuch mit Unterrichtsmaterialien}},
  editor       = {{Scharlau, Ingrid and Bender, Elena and Patrzek, Justine and Schreiber, Christine}},
  isbn         = {{978-3-662-69480-1}},
  pages        = {{271--300}},
  publisher    = {{Springer Nature}},
  title        = {{{Schreiben im Psychologieunterricht unterstützen}}},
  year         = {{2025}},
}

@unpublished{59839,
  abstract     = {{In many scientific approaches, especially in those that try to foster explainability of Artificial Intelligences, a narrow conception of explaining prevails. This narrow conception implies that explaining is a one-directional action in which knowledge is transferred from the explainer to an addressee. By studying the amount of agency in metaphors for explaining in scientific texts, we want to find out – or at least to contribute a partial answer to the question – why this narrow conception is so dominant. For our analysis, we use a linguistic conception of agency, transitivity. This concept allows to specify the degree of agency or effectiveness of the action in a verbalised event. It is defined by several component parts. We detail and discuss both the parameters of and global transitivity. Overall, transitivity of explaining metaphors has a rather common pattern across metaphors. Agency is not high and reduced in characteristic aspects: The metaphors imply that the object of explaining is static, i.e., is not changed within the explanation, and that explaining is the activity of one person only. This pattern may account for the narrow conception of explaining. It contrasts strongly with current co-constructive or sociotechnical approaches to explainability.}},
  author       = {{Scharlau, Ingrid and Rohlfing, Katharina J.}},
  publisher    = {{Center for Open Science}},
  title        = {{{Agency in metaphors of explaining: An analysis of scientific texts}}},
  year         = {{2025}},
}

@article{59756,
  abstract     = {{A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.
In this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.}},
  author       = {{Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}},
  issn         = {{1389-0417}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{XAI, Appropriate trust, Distrust, Reliance, Human-centric evaluation, Trustworthy AI}},
  publisher    = {{Elsevier BV}},
  title        = {{{Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}}},
  doi          = {{10.1016/j.cogsys.2025.101357}},
  year         = {{2025}},
}

@misc{59922,
  author       = {{Porwol, Philip and Scharlau, Ingrid}},
  publisher    = {{OSF}},
  title        = {{{An annotated corpus of elicited metaphors of explaining and understanding using MIPVU}}},
  doi          = {{10.17605/OSF.IO/Y6SMX}},
  year         = {{2025}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@unpublished{61119,
  abstract     = {{<p>The present article offers an assessment of intra-individual variability in visualattention using the Theory of Visual Attention, which provides a formal framework forquantifying attentional components. We specifically investigated overall attentionalcapacity – that is, the available processing speed – and its distribution, the relativeattentional weight.By reanalyzing a large existing dataset from Tünnermann and Scharlau (2021),we found that across multiple testing days, participants either remained stable within a20 Hz margin or showed consistent improvements in capacity – in some cases triplingtheir initial capacity. The weights in response to salient stimuli were remarkablyconsistent.To determine whether increases in capacity reflect pure test-retest effects or arefacilitated by consolidation between days, and to quantify within-day variability, weconducted a second study in which participants completed five self-administeredsessions within a single day. Capacities remained within the same magnitude and didnot show a consistent directional trend. The relative weights exhibited comparativelylittle variation in most participants, akin to the previously analyzed dataset. Further,estimation uncertainty increased with higher capacity values.These results suggest that capacity may be subject to training effects, but thatsuch improvements appear to depend on longer breaks between sessions. This hasimportant implications for individualized assessment: A personal prior could beestimated from a single session to accelerate future estimations, as long as subsequentsessions occur on the same day. Participants with higher capacities may require tailoredexperimentation methods when small to medium effects are of interest, due to increaseduncertainty.</p>}},
  author       = {{Banh, Ngoc Chi and Scharlau, Ingrid}},
  publisher    = {{Center for Open Science}},
  title        = {{{Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day}}},
  year         = {{2025}},
}

@misc{59921,
  author       = {{Scharlau, Ingrid and Miriam, Körber}},
  publisher    = {{OSF}},
  title        = {{{Metaphors in 24 WIRED Level 5 Videos (Data corpus)}}},
  doi          = {{10.17605/OSF.IO/94A2J}},
  year         = {{2025}},
}

@article{61244,
  abstract     = {{Explanations play a crucial role in knowledge transfer and meaning-making and are often described as a co-constructive process in which multiple agents collaboratively shape understanding. However, the metaphors used to conceptualize explaining may influence how this process is framed. This study investigates the extent to which the co-constructive nature of explaining is represented in explaining metaphors. Using a systematic analysis of agency, we examined how these metaphors depict the explanation process and the roles of the agents involved. We found that explaining metaphors lack collaboration between explainer and addressee, constructiveness of the process, as well as bidirectionality and iterativeness. In light of current research on metaphorical framing, the study thus highlights the risk that such explaining metaphors may reinforce a non-co-constructive perspective on explaining and a top-down approach in the development of AI systems as well as other areas.}},
  author       = {{Porwol, Philip Fabian and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  title        = {{{Is explaining more like showing or more like building? Agency in metaphors of explaining}}},
  doi          = {{https://doi.org/10.3389/fpsyg.2025.1628706}},
  year         = {{2025}},
}

@article{62165,
  abstract     = {{Academic publishing is both an indication of scientific contribution and a currency for career advancement. This dual role gives rise to a normative scientific conflict: Does the structural incentive to publish constitute a conflict of interest (COI) that ought to be disclosed? In this paper, we address this conflict through an action research approach, engaging collaboratively and reflexively to answer four related questions: (1) What evidence suggests that researchers face a (financial) COI when publishing? (2) What are the benefits and drawbacks of explicitly acknowledging that publications function as academic currency? (3) How should such conflicts be disclosed? (4) Do mechanisms such as pre-registration and registered reports resolve these concerns? This paper contends that while researchers are clearly incentivised to publish, this interest need not necessarily constitute a conflict or be explicitly disclosed. Treating this issue as a normative scientific conflict does reveal the need for a shift in how researchers understand and navigate the subjective, self-interested dimensions of their work. We propose four key responses: (1) integrating discussions of COIs and biases more extensively into undergraduate science education, (2) promoting greater reflexivity in everyday research practice (e.g., through reflexivity journals, peer-led audit groups, and the reintegration of discussions on the historicity and cultural nature of research into scientific publications), (3) critically investigating institutional incentives and journal policies, and (4) proactively adopting methodological safeguards such as pre-registration. By addressing this conflict through action research, we demonstrate how normative tensions in science can be made productive — supporting both critical reflection and structural improvement.}},
  author       = {{Acem, Ensar and Aczel, Balazs and Albayrak, Nihan and Brown, Nicholas J. L. and Dudda, Leonie A. and Elsherif, Mahmoud Medhat and Gjoneska, Biljana and Kowal, Marta and Krishna, Anand and Miłkoś, Szymon and Paruzel-Czachura, Mariola and Patel, Jay and Pypno-Blajda, Katarzyna and Scharlau, Ingrid and Verheyen, Steven and Zubaly, Benjamin}},
  issn         = {{0304-2421}},
  journal      = {{Theory and Society}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Why I declare a conflict of interest and you should not}}},
  doi          = {{10.1007/s11186-025-09641-3}},
  year         = {{2025}},
}

@article{61944,
  abstract     = {{his article describes a new student-based approach to Decoding and Disrupting. This approach consists of an educational programme that works with a multidisciplinary group of undergraduate students, so-called Culture Fellows. In the programme, the students learn about theories on disciplinary cultures and their practices, Decoding the Disciplines, and communication. The programme focuses on the implicitness of disciplinary practices and how to explore them, i.e., it seeks to address the importance of making disciplinary practices, norms, and values more accessible to students. Within the programme, the Culture Fellows use a student-based variant of the Decoding the Disciplines Paradigm that we call ‘Culture Quest’. The Culture Quest supplies students with the tools to question, explore, and decode their respective disciplines and practices. Disciplinary practices often remain implicit because lecturers, with their wealth of experience and expertise in their discipline, may be operationally blind and thus no longer aware of the fact that certain practices might need to be explained. This lack of explanation or explicitness can lead to students feeling bewildered, confused or puzzled. The Culture Fellows and in particular the Culture Quest can encourage dialogue about and exploration of disciplinary cultures and their practices and can thus allow students to grasp the implicit cultural norms and expectations within their respective disciplines more clearly. The Culture Fellows and the Culture Quest provide students – and maybe even lecturers – with opportunities to engage with and reflect on teaching and learning strategies and practices.}},
  author       = {{Schmöckel, Sabrina and Scharlau, Ingrid}},
  issn         = {{1918-0853}},
  journal      = {{Transformative Dialogues: Teaching and Learning Journal}},
  number       = {{3}},
  pages        = {{98--115}},
  publisher    = {{Penn State University Libraries}},
  title        = {{{No Student Should Be an Island: A Peer-Approach to Decoding and Disrupting}}},
  doi          = {{10.59236/td2025vol18iss31913}},
  volume       = {{18}},
  year         = {{2025}},
}

@article{62204,
  author       = {{Schmöckel, Sabrina}},
  journal      = {{die hochschullehre}},
  title        = {{{Hör mal, wer da spricht – Eine dialogische Perspektive auf studentische Äußerungen zu Fachkulturen}}},
  year         = {{2025}},
}

@unpublished{59917,
  abstract     = {{nder the slogan of trustworthy AI, much of contemporary AI research is focused on designing AI systems and usage practices that inspire human trust and, thus, enhance adoption of AI systems. However, a person affected by an AI system may not be convinced by AI system design alone---neither should they, if the AI system is embedded in a social context that gives good reason to believe that it is used in tension with a person’s interest. In such cases,  distrust in the system may be justified and necessary to build meaningful trust in the first place. We propose the term \emph{healthy distrust} to describe such a justified, careful stance towards certain AI usage practices. We investigate prior notions of trust and distrust in computer science, sociology, history, psychology, and philosophy, outline a remaining gap that healthy distrust might fill and conceptualize healthy distrust as a crucial part for AI usage that respects human autonomy.}},
  author       = {{Paaßen, Benjamin and Alpsancar, Suzana and Matzner, Tobias and Scharlau, Ingrid}},
  booktitle    = {{arXiv}},
  title        = {{{Healthy Distrust in AI systems}}},
  year         = {{2025}},
}

@article{62932,
  abstract     = {{Many previous studies on the conceptual function of metaphors have focused on their func-tion  of  highlighting  aspects  of  target  concepts.  From  the  beginning  of  this  research,  it  was knownthat conceptual metaphors also hide aspects of the target concept; however, this as-pect has been less studied. This study builds upon the idea that the hiding aspect of a specific metaphor should be identified in relation to other metaphors for the same concept. A method is presented to detail this relation based on the theory of semantic frames and the FrameNet resource to identify the hidden aspects and apply it to a corpus of 298 elicited metaphor texts on the target concept of understanding. The analysis revealed that certain conceptual aspects are consistently hidden by a majority of metaphors, pointing to patterns in conceptualization. Using  this  approach,  six  aspects  frequently  hidden  by  metaphors  were  identified:  Sociality, Transfer, Ownership, Perception, Foundation and Duration.}},
  author       = {{Porwol, Philip Fabian and Scharlau, Ingrid}},
  journal      = {{STUDIA NEOFILOLOGICZ: NEROZPRAWY JĘZYKOZNAWCZE (Modern Language Studies: Linguistic Essays)}},
  pages        = {{181--198}},
  publisher    = {{Uniwersytet Jana Długosza w Częstochowie}},
  title        = {{{What do metaphors of understanding hide?}}},
  volume       = {{XXI}},
  year         = {{2025}},
}

@inbook{59742,
  author       = {{Scharlau, Ingrid and Jenert, Tobias}},
  booktitle    = {{Psychologiedidaktik an allgemeinbildenden und beruflichen Schulen: Ein Lehrbuch mit Unterrichtsmaterialien}},
  editor       = {{Scharlau, Ingrid and Bender, Elena and Patrzek, Justine and Schreiber, Christine}},
  isbn         = {{978-3-662-69480-0}},
  pages        = {{1--17}},
  publisher    = {{Springer Nature}},
  title        = {{{Didaktische Überlegungen in der Psychologie}}},
  year         = {{2025}},
}

@inproceedings{53069,
  author       = {{Banh, Ngoc Chi and Scharlau, Ingrid}},
  location     = {{Regensburg}},
  title        = {{{Effects of task difficulty on visual processing speed}}},
  year         = {{2024}},
}

@inproceedings{54889,
  abstract     = {{To reach the goal of zero traffic fatalities a year, one building block is the proposition to develop advanced assistance systems for vulnerable road users (VRUs) such as bicyclists. We focus on the dooring problem, i.e., car doors being opened inattentively in the way of an approaching cyclist. We extended our vehicle to everything (V2X) communication-enabled virtual cycling environment for dooring experiments. Our system extends toolkits that are widely used in the V2X research community. We showcase how such a system may be used to realize and evaluate distributed algorithms for VRU safety solutions such as dooring prevention.}},
  author       = {{Stratmann, Lukas and Banh, Ngoc Chi and Scharlau, Ingrid and Dressler, Falko}},
  booktitle    = {{ACM Symposium on Principles of Distributed Computing (PODC 2024), Advanced tools, programming languages, and PLatforms for Implementing and Evaluating algorithms for Distributed systems (ApPLIED 2024)}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Safety Assistance Systems for Bicyclists: Toward Empirical Studies of the Dooring Problem}}},
  doi          = {{10.1145/3663338.3665831}},
  year         = {{2024}},
}

