@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@techreport{65021,
  abstract     = {{Several early music projects, such as the Stanford Josquin Project, have demonstrated the potential for attaining valuable new musicological insights using a corpus-based approach. However, the available musical corpora tend to be relatively small and exhibit considerable variation in encoding practices. Aspiring corpus researchers are confronted with a lack of suitable data, which needs to be addressed before they can embark on their proper research. The EarlyMuse Short Term Scientific Mission CORSICA has surveyed the current state of corpus creation and digital editing in early music. Based on this information, it has developed a vision for the future of corpus building in this field, which aims to speed up the production of digital encodings while respecting the autonomy of the encoders and acknowledging their efforts. This is important because much high-quality encoding is carried out outside the field of professional musicology, and engaging citizen scientists could help address the current shortage of research data. The CORSICA team‘s vision is informed not only by a study of the available data, standards and technologies, but also by Human-Computer Interaction, placing human goals and values before the creation of technology and work processes. The core of the vision is that successful corpus creation must be an inclusive endeavour in terms of both technology and human participation. The report concludes with an implementation plan outlining the initial steps required to realise the vision.}},
  author       = {{Wiering, Frans and Bergwall, Erik and van Berchum, Marnix and Goebl, Werner and Van Kranenburg, Peter and Lewis, David and Plaksin, Anna Viktoria Katrin and Rodríguez-García, Esperanza and Smith, David J. and Visscher, Mirjam and Weigl, David M.}},
  keywords     = {{citizen science, crowdsourcing, digital editions of music, early music, human computer interaction, music corpora, music encoding, musicology}},
  title        = {{{Making Corpus Creation in Early Music Rewarding and Effective: Finding the Optimum Between Standardisation and Autonomy}}},
  doi          = {{10.5281/zenodo.18413961}},
  year         = {{2026}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@inproceedings{61149,
  abstract     = {{The use of continuous fiber-reinforced thermoplastics (FRTP) in automotive industry increases due to their excellent material properties and possibility of rapid processing. The scale spanning heterogeneity of their material structure and its influence on the material behavior, however, presents significant challenges for most joining technologies, such as self-piercing riveting (SPR). During mechanical joining, the material structure is significantly altered within and around the joining zone, heavily influencing the material behavior. A comprehensive understanding of the underlying phenomena of material alteration during the SPR process is essential as basis for validating numerical simulations. This study examines the material structure at ten stages of a step-setting test of SPR with two FRTP sheets with glass-fiber reinforcement. Utilizing X-ray computed tomography (CT), the damage phenomena within different areas of the setting test are analyzed three-dimensionally and key parameters are quantified. Dominating phenomena during the penetration of the rivet into the laminate are fiber failure (FF), interfiber failure (IFF) and fiber bending, while delamination, fiber kinking and roving splitting are also observed. At the final stages, the bottom layers of the second sheet collapse and form a bulge into the cavity of the die.}},
  author       = {{Dargel, Alrik and Gröger, Benjamin and Schlichter, Malte Christian and Gerritzen, Johannes and Köhler, Daniel and Meschut, Gerson and Gude, Maik and Kupfer, Robert}},
  booktitle    = {{Proceedings of the 8th International Conference on Integrity-Reliability-Failure (IRF2025)}},
  editor       = {{Gomes, J.F. Silva and Meguid, Shaker A.}},
  isbn         = {{9789727523238}},
  keywords     = {{self-piercing riveting, computed tomography, thermoplastic composites, process-structure-interaction}},
  location     = {{Porto}},
  publisher    = {{FEUP}},
  title        = {{{LOCAL DEFORMATION AND FAILURE OF COMPOSITES DURING SELF-PIERCING RIVETING: A CT BASED MICROSTRUCTURE INVESTIGATION}}},
  doi          = {{10.24840/978-972-752-323-8}},
  year         = {{2025}},
}

@article{55400,
  abstract     = {{This study contributes to the evolving field of robot learning in interaction
with humans, examining the impact of diverse input modalities on learning
outcomes. It introduces the concept of "meta-modalities" which encapsulate
additional forms of feedback beyond the traditional preference and scalar
feedback mechanisms. Unlike prior research that focused on individual
meta-modalities, this work evaluates their combined effect on learning
outcomes. Through a study with human participants, we explore user preferences
for these modalities and their impact on robot learning performance. Our
findings reveal that while individual modalities are perceived differently,
their combination significantly improves learning behavior and usability. This
research not only provides valuable insights into the optimization of
human-robot interactive task learning but also opens new avenues for enhancing
the interactive freedom and scaffolding capabilities provided to users in such
settings.}},
  author       = {{Beierling, Helen and Beierling, Robin  and Vollmer, Anna-Lisa}},
  journal      = {{Frontiers in Robotics and AI}},
  keywords     = {{human-robot interaction, human-in-the-loop learning, reinforcement learning, interactive robot learning, multi-modal feedback, learning from demonstration, preference-based learning, scaffolding in robot learning}},
  publisher    = {{Frontiers }},
  title        = {{{The power of combined modalities in interactive robot learning}}},
  volume       = {{12}},
  year         = {{2025}},
}

@article{61327,
  abstract     = {{Robot learning from humans has been proposed and researched for several decades as a means to enable robots to learn new skills or
adapt existing ones to new situations. Recent advances in artificial intelligence, including learning approaches like reinforcement
learning and architectures like transformers and foundation models, combined with access to massive datasets, has created attractive
opportunities to apply those data-hungry techniques to this problem. We argue that the focus on massive amounts of pre-collected
data, and the resulting learning paradigm, where humans demonstrate and robots learn in isolation, is overshadowing a specialized
area of work we term Human-Interactive-Robot-Learning (HIRL). This paradigm, wherein robots and humans interact during the
learning process, is at the intersection of multiple fields (artificial intelligence, robotics, human-computer interaction, design and others)
and holds unique promise. Using HIRL, robots can achieve greater sample efficiency (as humans can provide task knowledge through
interaction), align with human preferences (as humans can guide the robot behavior towards their expectations), and explore more
meaningfully and safely (as humans can utilize domain knowledge to guide learning and prevent catastrophic failures). This can result
in robotic systems that can more quickly and easily adapt to new tasks in human environments. The objective of this paper is to
provide a broad and consistent overview of HIRL research and to guide researchers toward understanding the scope of HIRL, and
current open or underexplored challenges related to four themes — namely, human, robot learning, interaction, and broader context.
The paper includes concrete use cases to illustrate the interaction between these challenges and inspire further research according to
broad recommendations and a call for action for the growing HIRL community}},
  author       = {{Baraka, Kim  and Idrees, Ifrah and Faulkner, Taylor Kessler and Biyik, Erdem and Booth, Serena and Chetouani, Mohamed and Grollman, Daniel H. and Saran, Akanksha and Senft, Emmanuel and Tulli, Silvia and Vollmer, Anna-Lisa and Andriella, Antonio and Beierling, Helen and Horter, Tiffany and Kober, Jens and Sheidlower, Isaac and Taylor, Matthew E. and van Waveren, Sanne and Xiao, Xuesu}},
  journal      = {{Transactions on Human-Robot Interaction}},
  keywords     = {{Robot learning, Interactive learning systems, Human-robot interaction, Human-in-the-loop machine learning, Teaching and learning}},
  title        = {{{Human-Interactive Robot Learning: Definition, Challenges, and Recommendations}}},
  year         = {{2025}},
}

@article{61339,
  author       = {{Protte, Marius and Djawadi, Behnud Mir}},
  journal      = {{Frontiers in Behavioral Economics}},
  keywords     = {{cheating, human-machine interaction, ambiguity, verification process, algorithm aversion, algorithm appreciation}},
  pages        = {{1645749}},
  title        = {{{Human vs. Algorithmic Auditors: The Impact of Entity Type and Ambiguity on Human Dishonesty}}},
  doi          = {{10.3389/frbhe.2025.1645749}},
  volume       = {{4}},
  year         = {{2025}},
}

@inproceedings{62116,
  author       = {{Böer, Nils Tobias and Güldenpenning, Iris and Weigelt, Matthias}},
  booktitle    = {{57th Herbsttreffen der experimentellen Kognitionspsychologie (HExKoP)}},
  keywords     = {{Deception, Sport Psychology, Social Interaction}},
  location     = {{Trier}},
  title        = {{{The mere presence of a social partner modulates fake-production costs}}},
  year         = {{2025}},
}

@article{44639,
  author       = {{Hoppe, Julia Amelie and Tuisku, Outi and Johansson-Pajala, Rose-Marie and Pekkarinen, Satu and Hennala, Lea and Gustafsson, Christine and Melkas, Helinä and Thommes, Kirsten}},
  issn         = {{2451-9588}},
  journal      = {{Computers in Human Behavior Reports}},
  keywords     = {{Artificial Intelligence, Cognitive Neuroscience, Computer Science Applications, Human-Computer Interaction, Applied Psychology, Neuroscience (miscellaneous)}},
  publisher    = {{Elsevier BV}},
  title        = {{{When do individuals choose care robots over a human caregiver? Insights from a laboratory experiment on choices under uncertainty}}},
  doi          = {{10.1016/j.chbr.2022.100258}},
  volume       = {{9}},
  year         = {{2023}},
}

@inproceedings{50118,
  abstract     = {{Despite the widespread use of machine learning algorithms, their effectiveness is limited by a phenomenon known as algorithm aversion. Recent research concluded that unobserved variables can cause algorithm aversion. However, the impact of an unobserved variable on algorithm aversion remains unclear. Previous studies focused on situations where humans had more variables available than algorithms. We extend this research by conducting an online experiment with 94 participants, systematically varying the number of observable variables to the advisor and the advisor type. Surprisingly, our results did not confirm that an unobserved variable had a negative effect on advice-taking. Instead, we found a positive impact in an algorithm appreciation scenario. This study provides new insights into the paradoxical behavior in which people weigh advice more despite having fewer variables, as they correct for the advisor's errors. Practitioners should consider this behavior when designing algorithms and account for user correction behavior.}},
  author       = {{Leffrang, Dirk}},
  booktitle    = {{Wirtschaftsinformatik Conference}},
  keywords     = {{Algorithm aversion, Data, Decision-making, Advice-taking, Human-Computer Interaction}},
  location     = {{Paderborn}},
  number       = {{19}},
  title        = {{{The Broken Leg of Algorithm Appreciation: An Experimental Study on the Effect of Unobserved Variables on Advice Utilization}}},
  year         = {{2023}},
}

@article{51371,
  abstract     = {{<jats:p>In this paper, we investigate the effect of distractions and hesitations as a scaffolding strategy. Recent research points to the potential beneficial effects of a speaker’s hesitations on the listeners’ comprehension of utterances, although results from studies on this issue indicate that humans do not make strategic use of them. The role of hesitations and their communicative function in human-human interaction is a much-discussed topic in current research. To better understand the underlying cognitive processes, we developed a human–robot interaction (HRI) setup that allows the measurement of the electroencephalogram (EEG) signals of a human participant while interacting with a robot. We thereby address the research question of whether we find effects on single-trial EEG based on the distraction and the corresponding robot’s hesitation scaffolding strategy. To carry out the experiments, we leverage our LabLinking method, which enables interdisciplinary joint research between remote labs. This study could not have been conducted without LabLinking, as the two involved labs needed to combine their individual expertise and equipment to achieve the goal together. The results of our study indicate that the EEG correlates in the distracted condition are different from the baseline condition without distractions. Furthermore, we could differentiate the EEG correlates of distraction with and without a hesitation scaffolding strategy. This proof-of-concept study shows that LabLinking makes it possible to conduct collaborative HRI studies in remote laboratories and lays the first foundation for more in-depth research into robotic scaffolding strategies.</jats:p>}},
  author       = {{Richter, Birte and Putze, Felix and Ivucic, Gabriel and Brandt, Mara and Schütze, Christian and Reisenhofer, Rafael and Wrede, Britta and Schultz, Tanja}},
  issn         = {{2414-4088}},
  journal      = {{Multimodal Technologies and Interaction}},
  keywords     = {{Computer Networks and Communications, Computer Science Applications, Human-Computer Interaction, Neuroscience (miscellaneous)}},
  number       = {{4}},
  publisher    = {{MDPI AG}},
  title        = {{{EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study}}},
  doi          = {{10.3390/mti7040037}},
  volume       = {{7}},
  year         = {{2023}},
}

@article{43437,
  abstract     = {{<jats:p>In virtual reality (VR), participants may not always have hands, bodies, eyes, or even voices—using VR helmets and two controllers, participants control an avatar through virtual worlds that do not necessarily obey familiar laws of physics; moreover, the avatar’s bodily characteristics may not neatly match our bodies in the physical world. Despite these limitations and specificities, humans get things done through collaboration and the creative use of the environment. While multiuser interactive VR is attracting greater numbers of participants, there are currently few attempts to analyze the in situ interaction systematically. This paper proposes a video-analytic detail-oriented methodological framework for studying virtual reality interaction. Using multimodal conversation analysis, the paper investigates a nonverbal, embodied, two-person interaction: two players in a survival game strive to gesturally resolve a misunderstanding regarding an in-game mechanic—however, both of their microphones are turned off for the duration of play. The players’ inability to resort to complex language to resolve this issue results in a dense sequence of back-and-forth activity involving gestures, object manipulation, gaze, and body work. Most crucially, timing and modified repetitions of previously produced actions turn out to be the key to overcome both technical and communicative challenges. The paper analyzes these action sequences, demonstrates how they generate intended outcomes, and proposes a vocabulary to speak about these types of interaction more generally. The findings demonstrate the viability of multimodal analysis of VR interaction, shed light on unique challenges of analyzing interaction in virtual reality, and generate broader methodological insights about the study of nonverbal action.</jats:p>}},
  author       = {{Klowait, Nils}},
  issn         = {{2578-1863}},
  journal      = {{Human Behavior and Emerging Technologies}},
  keywords     = {{Human-Computer Interaction, General Social Sciences, Social Psychology, Virtual Reality : Multimodality, Nonverbal Interaction, Search Sequence, Gesture, Co-Operative Action, Goodwin, Ethnomethodology}},
  pages        = {{1--15}},
  publisher    = {{Hindawi Limited}},
  title        = {{{On the Multimodal Resolution of a Search Sequence in Virtual Reality}}},
  doi          = {{10.1155/2023/8417012}},
  volume       = {{2023}},
  year         = {{2023}},
}

@article{48543,
  abstract     = {{Explanation has been identified as an important capability for AI-based systems, but research on systematic strategies for achieving understanding in interaction with such systems is still sparse. Negation is a linguistic strategy that is often used in explanations. It creates a contrast space between the affirmed and the negated item that enriches explaining processes with additional contextual information. While negation in human speech has been shown to lead to higher processing costs and worse task performance in terms of recall or action execution when used in isolation, it can decrease processing costs when used in context. So far, it has not been considered as a guiding strategy for explanations in human-robot interaction. We conducted an empirical study to investigate the use of negation as a guiding strategy in explanatory human-robot dialogue, in which a virtual robot explains tasks and possible actions to a human explainee to solve them in terms of gestures on a touchscreen. Our results show that negation vs. affirmation 1) increases processing costs measured as reaction time and 2) increases several aspects of task performance. While there was no significant effect of negation on the number of initially correctly executed gestures, we found a significantly lower number of attempts—measured as breaks in the finger movement data before the correct gesture was carried out—when being instructed through a negation. We further found that the gestures significantly resembled the presented prototype gesture more following an instruction with a negation as opposed to an affirmation. Also, the participants rated the benefit of contrastive vs. affirmative explanations significantly higher. Repeating the instructions decreased the effects of negation, yielding similar processing costs and task performance measures for negation and affirmation after several iterations. We discuss our results with respect to possible effects of negation on linguistic processing of explanations and limitations of our study.}},
  author       = {{Groß, A. and Singh, Amit and Banh, Ngoc Chi and Richter, B. and Scharlau, Ingrid and Rohlfing, Katharina J. and Wrede, B.}},
  journal      = {{Frontiers in Robotics and AI}},
  keywords     = {{HRI, XAI, negation, understanding, explaining, touch interaction, gesture}},
  title        = {{{Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue}}},
  doi          = {{10.3389/frobt.2023.1236184}},
  volume       = {{10}},
  year         = {{2023}},
}

@article{32266,
  author       = {{Hoppe, Julia Amelie and Melkas, Helinä and Pekkarinen, Satu and Tuisku, Outi and Hennala, Lea and Johansson-Pajala, Rose-Marie and Gustafsson, Christine and Thommes, Kirsten}},
  issn         = {{1044-7318}},
  journal      = {{International Journal of Human–Computer Interaction}},
  keywords     = {{Computer Science Applications, Human-Computer Interaction, Human Factors and Ergonomics}},
  pages        = {{1--17}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Perception of Society’s Trust in Care Robots by Public Opinion Leaders}}},
  doi          = {{10.1080/10447318.2022.2081283}},
  year         = {{2022}},
}

@article{32267,
  author       = {{Hoppe, Julia Amelie and Melkas, Helinä and Pekkarinen, Satu and Tuisku, Outi and Hennala, Lea and Johansson-Pajala, Rose-Marie and Gustafsson, Christine and Thommes, Kirsten}},
  issn         = {{1044-7318}},
  journal      = {{International Journal of Human–Computer Interaction}},
  keywords     = {{Computer Science Applications, Human-Computer Interaction, Human Factors and Ergonomics}},
  pages        = {{1--17}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Perception of Society’s Trust in Care Robots by Public Opinion Leaders}}},
  doi          = {{10.1080/10447318.2022.2081283}},
  year         = {{2022}},
}

@article{34046,
  author       = {{Hoffmann, Christin and Thommes, Kirsten}},
  issn         = {{2168-2291}},
  journal      = {{IEEE Transactions on Human-Machine Systems}},
  keywords     = {{Artificial Intelligence, Computer Networks and Communications, Computer Science Applications, Human-Computer Interaction, Signal Processing, Control and Systems Engineering, Human Factors and Ergonomics}},
  pages        = {{1--11}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Seizing the Opportunity for Automation—How Traffic Density Determines Truck Drivers' Use of Cruise Control}}},
  doi          = {{10.1109/thms.2022.3212335}},
  year         = {{2022}},
}

@article{30218,
  author       = {{Tuisku, Outi and Johansson-Pajala, Rose-Marie and Hoppe, Julia Amelie and Pekkarinen, Satu and Hennala, Lea and Thommes, Kirsten and Gustafsson, Christine and Melkas, Helinä}},
  issn         = {{0144-929X}},
  journal      = {{Behaviour & Information Technology}},
  keywords     = {{Human-Computer Interaction, General Social Sciences, Arts and Humanities (miscellaneous), Developmental and Educational Psychology}},
  pages        = {{1--17}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Assistant nurses and orientation to care robot use in three European countries}}},
  doi          = {{10.1080/0144929x.2022.2042736}},
  year         = {{2022}},
}

@article{44637,
  author       = {{Hoppe, Julia Amelie and Tuisku, Outi and Johansson-Pajala, Rose-Marie and Pekkarinen, Satu and Hennala, Lea and Gustafsson, Christine and Melkas, Helinä and Thommes, Kirsten}},
  issn         = {{2451-9588}},
  journal      = {{Computers in Human Behavior Reports}},
  keywords     = {{Artificial Intelligence, Cognitive Neuroscience, Computer Science Applications, Human-Computer Interaction, Applied Psychology, Neuroscience (miscellaneous)}},
  publisher    = {{Elsevier BV}},
  title        = {{{When do individuals choose care robots over a human caregiver? Insights from a laboratory experiment on choices under uncertainty}}},
  doi          = {{10.1016/j.chbr.2022.100258}},
  volume       = {{9}},
  year         = {{2022}},
}

@article{34295,
  author       = {{Hoppe, Julia Amelie and Tuisku, Outi and Johansson-Pajala, Rose-Marie and Pekkarinen, Satu and Hennala, Lea and Gustafsson, Christine and Melkas, Helinä and Thommes, Kirsten}},
  issn         = {{2451-9588}},
  journal      = {{Computers in Human Behavior Reports}},
  keywords     = {{Artificial Intelligence, Cognitive Neuroscience, Computer Science Applications, Human-Computer Interaction, Applied Psychology, Neuroscience (miscellaneous)}},
  publisher    = {{Elsevier BV}},
  title        = {{{When do individuals choose care robots over a human caregiver? Insights from a laboratory experiment on choices under uncertainty}}},
  doi          = {{10.1016/j.chbr.2022.100258}},
  year         = {{2022}},
}

@article{44636,
  author       = {{Hoppe, Julia A. and Tuisku, Outi and Johansson-Pajala, Rose-Marie and Pekkarinen, Satu and Hennala, Lea and Gustafsson, Christine and Melkas, Helinä and Thommes, Kirsten}},
  issn         = {{2451-9588}},
  journal      = {{Computers in Human Behavior Reports}},
  keywords     = {{Artificial Intelligence, Cognitive Neuroscience, Computer Science Applications, Human-Computer Interaction, Applied Psychology, Neuroscience (miscellaneous)}},
  publisher    = {{Elsevier BV}},
  title        = {{{When do individuals choose care robots over a human caregiver? Insights from a laboratory experiment on choices under uncertainty}}},
  doi          = {{10.1016/j.chbr.2022.100258}},
  volume       = {{9}},
  year         = {{2022}},
}

