@article{55394,
  abstract     = {{Nowadays we deal with robots and AI more and more in our everyday life. However, their behavior is not always apparent to most lay users, especially in error situations. This can lead to misconceptions about the behavior of the technologies being used. This in turn can lead to misuse and rejection by users. Explanation, for example through transparency, can address these misconceptions. However, explaining the entire software or hardware would be confusing and overwhelming for users. Therefore, this paper focuses on the ‘enabling’ architecture. It describes those aspects of a robotic system that may need to be explained to enable someone to use the technology effectively. Furthermore, this paper deals with the ‘explanandum’, i.e. the corresponding misunderstandings or missing concepts of the enabling architecture that need to be clarified. Thus, we have developed and are presenting an approach to determine the ‘enabling’ architecture and the resulting ‘explanandum’ of complex technologies.}},
  author       = {{Beierling, Helen and Richter, Phillip and Brandt, Mara and Terfloth, Lutz and Schulte, Carsten and Wersing, Heiko and Vollmer, Anna-Lisa}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{Robotics HRI Explainability Didactics Didactic reconstruction}},
  publisher    = {{Elsevier}},
  title        = {{{What you need to know about a learning robot: Identifying the enabling  architecture of complex systems}}},
  volume       = {{88}},
  year         = {{2024}},
}

