@inbook{61112,
  author       = {{Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}},
  publisher    = {{Springer}},
  title        = {{{Practices: How to establish an explaining practice}}},
  doi          = {{10.1007/978-981-96-5290-7_5}},
  year         = {{2026}},
}

@inbook{61325,
  author       = {{Vollmer, Anna-Lisa and Buhl, Heike M. and Alami, Rachid and Främling, Kary and Grimminger, Angela and Booshehri, Meisam and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Social Explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}},
  pages        = {{39--53}},
  publisher    = {{Springer}},
  title        = {{{Components of an explanation for co-constructive sXAI}}},
  doi          = {{10.1007/978-981-96-5290-7_3}},
  year         = {{2026}},
}

@article{55400,
  abstract     = {{This study contributes to the evolving field of robot learning in interaction
with humans, examining the impact of diverse input modalities on learning
outcomes. It introduces the concept of "meta-modalities" which encapsulate
additional forms of feedback beyond the traditional preference and scalar
feedback mechanisms. Unlike prior research that focused on individual
meta-modalities, this work evaluates their combined effect on learning
outcomes. Through a study with human participants, we explore user preferences
for these modalities and their impact on robot learning performance. Our
findings reveal that while individual modalities are perceived differently,
their combination significantly improves learning behavior and usability. This
research not only provides valuable insights into the optimization of
human-robot interactive task learning but also opens new avenues for enhancing
the interactive freedom and scaffolding capabilities provided to users in such
settings.}},
  author       = {{Beierling, Helen and Beierling, Robin  and Vollmer, Anna-Lisa}},
  journal      = {{Frontiers in Robotics and AI}},
  keywords     = {{human-robot interaction, human-in-the-loop learning, reinforcement learning, interactive robot learning, multi-modal feedback, learning from demonstration, preference-based learning, scaffolding in robot learning}},
  publisher    = {{Frontiers }},
  title        = {{{The power of combined modalities in interactive robot learning}}},
  volume       = {{12}},
  year         = {{2025}},
}

@article{61327,
  abstract     = {{Robot learning from humans has been proposed and researched for several decades as a means to enable robots to learn new skills or
adapt existing ones to new situations. Recent advances in artificial intelligence, including learning approaches like reinforcement
learning and architectures like transformers and foundation models, combined with access to massive datasets, has created attractive
opportunities to apply those data-hungry techniques to this problem. We argue that the focus on massive amounts of pre-collected
data, and the resulting learning paradigm, where humans demonstrate and robots learn in isolation, is overshadowing a specialized
area of work we term Human-Interactive-Robot-Learning (HIRL). This paradigm, wherein robots and humans interact during the
learning process, is at the intersection of multiple fields (artificial intelligence, robotics, human-computer interaction, design and others)
and holds unique promise. Using HIRL, robots can achieve greater sample efficiency (as humans can provide task knowledge through
interaction), align with human preferences (as humans can guide the robot behavior towards their expectations), and explore more
meaningfully and safely (as humans can utilize domain knowledge to guide learning and prevent catastrophic failures). This can result
in robotic systems that can more quickly and easily adapt to new tasks in human environments. The objective of this paper is to
provide a broad and consistent overview of HIRL research and to guide researchers toward understanding the scope of HIRL, and
current open or underexplored challenges related to four themes — namely, human, robot learning, interaction, and broader context.
The paper includes concrete use cases to illustrate the interaction between these challenges and inspire further research according to
broad recommendations and a call for action for the growing HIRL community}},
  author       = {{Baraka, Kim  and Idrees, Ifrah and Faulkner, Taylor Kessler and Biyik, Erdem and Booth, Serena and Chetouani, Mohamed and Grollman, Daniel H. and Saran, Akanksha and Senft, Emmanuel and Tulli, Silvia and Vollmer, Anna-Lisa and Andriella, Antonio and Beierling, Helen and Horter, Tiffany and Kober, Jens and Sheidlower, Isaac and Taylor, Matthew E. and van Waveren, Sanne and Xiao, Xuesu}},
  journal      = {{Transactions on Human-Robot Interaction}},
  keywords     = {{Robot learning, Interactive learning systems, Human-robot interaction, Human-in-the-loop machine learning, Teaching and learning}},
  title        = {{{Human-Interactive Robot Learning: Definition, Challenges, and Recommendations}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{55394,
  abstract     = {{Nowadays we deal with robots and AI more and more in our everyday life. However, their behavior is not always apparent to most lay users, especially in error situations. This can lead to misconceptions about the behavior of the technologies being used. This in turn can lead to misuse and rejection by users. Explanation, for example through transparency, can address these misconceptions. However, explaining the entire software or hardware would be confusing and overwhelming for users. Therefore, this paper focuses on the ‘enabling’ architecture. It describes those aspects of a robotic system that may need to be explained to enable someone to use the technology effectively. Furthermore, this paper deals with the ‘explanandum’, i.e. the corresponding misunderstandings or missing concepts of the enabling architecture that need to be clarified. Thus, we have developed and are presenting an approach to determine the ‘enabling’ architecture and the resulting ‘explanandum’ of complex technologies.}},
  author       = {{Beierling, Helen and Richter, Phillip and Brandt, Mara and Terfloth, Lutz and Schulte, Carsten and Wersing, Heiko and Vollmer, Anna-Lisa}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{Robotics HRI Explainability Didactics Didactic reconstruction}},
  publisher    = {{Elsevier}},
  title        = {{{What you need to know about a learning robot: Identifying the enabling  architecture of complex systems}}},
  volume       = {{88}},
  year         = {{2024}},
}

@misc{55398,
  abstract     = {{This position paper calls for an increase in the
number and flexibility of input mechanisms in user-robot in-
teractions, highlighting their potential to enhance learning algo-
rithms through user feedback. Moreover, we argue that refining
interfaces, interactions, and systems is crucial for the optimal
integration of mechanisms into learning processes. Our call
to research involves the development of interfaces that enable
flexible mechanisms, and the mechanisms interactions can benefit
most from, and the algorithmic incorporation of user input. This
aims to advance the adaptability and responsiveness of robotic
systems in human-centric environments}},
  author       = {{Beierling, Helen and Loos, Kira and Helmert, Robin and Vollmer, Anna-Lisa}},
  publisher    = {{Proc. Mech. Mapping Hum. Input Robots Robot Learn. Shared Control/Autonomy-Workshop RSS}},
  title        = {{{Advancing Human-Robot Collaboration: The Impact of Flexible Input Mechanisms}}},
  year         = {{2024}},
}

@inproceedings{51370,
  author       = {{Dyck, Leonie and Beierling, Helen and Helmert, Robin and Vollmer, Anna-Lisa}},
  booktitle    = {{Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction}},
  location     = {{Stockholm }},
  pages        = {{720--724}},
  publisher    = {{ACM}},
  title        = {{{Technical Transparency for Robot Navigation Through AR Visualizations}}},
  doi          = {{10.1145/3568294.3580181}},
  year         = {{2023}},
}

@unpublished{55396,
  abstract     = {{Explainability has become an important topic in computer science and
artificial intelligence, leading to a subfield called Explainable Artificial
Intelligence (XAI). The goal of providing or seeking explanations is to achieve
(better) 'understanding' on the part of the explainee. However, what it means
to 'understand' is still not clearly defined, and the concept itself is rarely
the subject of scientific investigation. This conceptual article aims to
present a model of forms of understanding in the context of XAI and beyond.
From an interdisciplinary perspective bringing together computer science,
linguistics, sociology, and psychology, a definition of understanding and its
forms, assessment, and dynamics during the process of giving everyday
explanations are explored. Two types of understanding are considered as
possible outcomes of explanations, namely enabledness, 'knowing how' to do or
decide something, and comprehension, 'knowing that' -- both in different
degrees (from shallow to deep). Explanations regularly start with shallow
understanding in a specific domain and can lead to deep comprehension and
enabledness of the explanandum, which we see as a prerequisite for human users
to gain agency. In this process, the increase of comprehension and enabledness
are highly interdependent. Against the background of this systematization,
special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  booktitle    = {{arXiv:2311.08760}},
  title        = {{{Forms of Understanding of XAI-Explanations}}},
  year         = {{2023}},
}

