[{"year":"2025","quality_controlled":"1","title":"Manners Matter: Action history guides attention and repair choices during interaction","date_created":"2025-09-24T12:32:52Z","abstract":[{"text":"This study investigated how action histories – unfolding sequences of actions with objects – provide a context for both attentional allocation and linguistic repair strategies. Building on theories of enactive cognition and sensorimotor contingency theory, we experimentally manipulated action sequences (action history) to create either simple or rich “situational models,” and investigated how these models interact with attention and reflect in linguistic processes during human–robot interaction. Participants (N = 30) engaged in a controlled object placement task with a humanoid robot, where the action (manner) information was either provided or omitted. The omission elicited repair behaviors in participants that were in focus of our investigation. For rich models (competing action possibilities) participants demonstrated: a) increased attentional reorientation, reflecting active engagement with the situational model b) preference for restricted repairs, targeting the specific source of trouble in action selection. Conversely, a simple situational model led to more generalized attention patterns and open repair strategies, suggesting weaker constraints on internal processing. These findings highlight how situational structures emerge externally to scaffold internal cognitive processes, with action histories serving as a crucial context for the interface between perception, action, and language. We discuss how to implement such a tight loop in the assistance of a system.","lang":"eng"}],"publication":"IEEE International Conference on Development and Learning (ICDL)","keyword":["Attention","Action","Repairs","Task model","HRI","Eyemovement"],"language":[{"iso":"eng"}],"place":" Prague","citation":{"apa":"Singh, A., &#38; Rohlfing, K. J. (2025). Manners Matter: Action history guides attention and repair choices during interaction. <i>IEEE International Conference on Development and Learning (ICDL)</i>. IEEE International Conference on Development and Learning (ICDL), Prague. <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">https://doi.org/10.31234/osf.io/yn2we_v1</a>","bibtex":"@inproceedings{Singh_Rohlfing_2025, place={ Prague}, title={Manners Matter: Action history guides attention and repair choices during interaction}, DOI={<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>}, booktitle={IEEE International Conference on Development and Learning (ICDL)}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2025} }","short":"A. Singh, K.J. Rohlfing, in: IEEE International Conference on Development and Learning (ICDL),  Prague, 2025.","mla":"Singh, Amit, and Katharina J. Rohlfing. “Manners Matter: Action History Guides Attention and Repair Choices during Interaction.” <i>IEEE International Conference on Development and Learning (ICDL)</i>, 2025, doi:<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Manners Matter: Action History Guides Attention and Repair Choices during Interaction.” In <i>IEEE International Conference on Development and Learning (ICDL)</i>.  Prague, 2025. <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">https://doi.org/10.31234/osf.io/yn2we_v1</a>.","ieee":"A. Singh and K. J. Rohlfing, “Manners Matter: Action history guides attention and repair choices during interaction,” presented at the IEEE International Conference on Development and Learning (ICDL), Prague, 2025, doi: <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>.","ama":"Singh A, Rohlfing KJ. Manners Matter: Action history guides attention and repair choices during interaction. In: <i>IEEE International Conference on Development and Learning (ICDL)</i>. ; 2025. doi:<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>"},"publication_status":"published","conference":{"location":"Prague","end_date":"2025-09-19","start_date":"2025-09-15","name":"IEEE International Conference on Development and Learning (ICDL)"},"doi":"10.31234/osf.io/yn2we_v1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.31234/osf.io/yn2we_v1"}],"date_updated":"2025-09-24T12:39:25Z","oa":"1","author":[{"id":"91018","full_name":"Singh, Amit","orcid":"0000-0002-7789-1521","last_name":"Singh","first_name":"Amit"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing","orcid":"0000-0002-5676-8233"}],"status":"public","type":"conference","_id":"61432","project":[{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"}],"department":[{"_id":"749"},{"_id":"660"}],"user_id":"91018"},{"title":"Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue","doi":"10.1145/3686215.3689202","conference":{"name":"26th ACM International Conference on Multimodal Interaction (ICMI 2024)","location":"San Jose, Costa Rica"},"date_updated":"2024-11-06T10:56:34Z","date_created":"2024-10-17T09:35:32Z","author":[{"orcid":"0000-0002-7789-1521","last_name":"Singh","id":"91018","full_name":"Singh, Amit","first_name":"Amit"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina J.","id":"50352","first_name":"Katharina J."}],"year":"2024","citation":{"ama":"Singh A, Rohlfing KJ. Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue. In: <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>. ; 2024. doi:<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue.” In <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>, 2024. <a href=\"https://doi.org/10.1145/3686215.3689202\">https://doi.org/10.1145/3686215.3689202</a>.","ieee":"A. Singh and K. J. Rohlfing, “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue,” presented at the 26th ACM International Conference on Multimodal Interaction (ICMI 2024), San Jose, Costa Rica, 2024, doi: <a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>.","apa":"Singh, A., &#38; Rohlfing, K. J. (2024). Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue. <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>. 26th ACM International Conference on Multimodal Interaction (ICMI 2024), San Jose, Costa Rica. <a href=\"https://doi.org/10.1145/3686215.3689202\">https://doi.org/10.1145/3686215.3689202</a>","short":"A. Singh, K.J. Rohlfing, in: Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024), 2024.","bibtex":"@inproceedings{Singh_Rohlfing_2024, title={Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue}, DOI={<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>}, booktitle={Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2024} }","mla":"Singh, Amit, and Katharina J. Rohlfing. “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue.” <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>, 2024, doi:<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>."},"has_accepted_license":"1","keyword":["Explanation","Scaffolding","Eyetracking","Partner Model","HRI"],"ddc":["410"],"language":[{"iso":"eng"}],"_id":"56660","project":[{"grant_number":"438445824","_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)"}],"department":[{"_id":"749"},{"_id":"660"}],"user_id":"91018","abstract":[{"lang":"eng","text":"In a successful dialogue in general and a successful explanation in specific, partners need to account for both, the task model (what is relevant for the task) and the partner model (what one can con- tribute). The phenomenon of coupling between task and the partner model becomes especially interesting in the context of Human– Robot Interaction where humans have to deal with unknown ca- pabilities of the robot, which can momentarily be perceived when the robot is unable to contribute to the task. Following research on the path over manner prominence in an action [31–33], a robot ex- plained actions to a human by emphasizing two aspects – the path (\"where\" component) and the manner (\"how\" component). On criti- cal trials, the robot occasionally omitted one of these components where participants sought missing information for the path or the manner. Participants’ information-seeking and gaze behaviour were analysed. Analysis confirms the initial predictions for, a) task model (path over manner prominence), i.e., earlier information-seeking for path-missing than manner-missing trials, and b) partner model, i.e., while information-seeking is predominantly tied to the attention on the robot’s face, when robot fails to provide resolution, attention shifts more often towards its torso – a behavior likely to indicate an exploration of the robot’s capabilities. An individual-level anal- ysis further confirms that the intra-individual variation in the task model is partly influenced by the perceived capability of the robot."}],"status":"public","publication":"Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)","type":"conference"},{"publication":"Cognitive Systems Research","abstract":[{"text":"Nowadays we deal with robots and AI more and more in our everyday life. However, their behavior is not always apparent to most lay users, especially in error situations. This can lead to misconceptions about the behavior of the technologies being used. This in turn can lead to misuse and rejection by users. Explanation, for example through transparency, can address these misconceptions. However, explaining the entire software or hardware would be confusing and overwhelming for users. Therefore, this paper focuses on the ‘enabling’ architecture. It describes those aspects of a robotic system that may need to be explained to enable someone to use the technology effectively. Furthermore, this paper deals with the ‘explanandum’, i.e. the corresponding misunderstandings or missing concepts of the enabling architecture that need to be clarified. Thus, we have developed and are presenting an approach to determine the ‘enabling’ architecture and the resulting ‘explanandum’ of complex technologies.","lang":"eng"}],"file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2025-09-17T13:31:11Z","date_created":"2025-09-17T13:31:11Z","creator":"helebeen","file_size":1577897,"file_name":"mentaleModelle.pdf","access_level":"closed","file_id":"61330"}],"ddc":["006"],"keyword":["Robotics HRI Explainability Didactics Didactic reconstruction"],"language":[{"iso":"eng"}],"year":"2024","publisher":"Elsevier","date_created":"2024-07-26T08:01:23Z","title":"What you need to know about a learning robot: Identifying the enabling  architecture of complex systems","type":"journal_article","status":"public","project":[{"name":"TRR 318 - B5: TRR 318 - Subproject B5","_id":"123"}],"_id":"55394","user_id":"50995","article_type":"original","funded_apc":"1","file_date_updated":"2025-09-17T13:31:11Z","publication_status":"published","has_accepted_license":"1","citation":{"ama":"Beierling H, Richter P, Brandt M, et al. What you need to know about a learning robot: Identifying the enabling  architecture of complex systems. <i>Cognitive Systems Research</i>. 2024;88.","ieee":"H. Beierling <i>et al.</i>, “What you need to know about a learning robot: Identifying the enabling  architecture of complex systems,” <i>Cognitive Systems Research</i>, vol. 88, 2024.","chicago":"Beierling, Helen, Phillip Richter, Mara Brandt, Lutz Terfloth, Carsten Schulte, Heiko Wersing, and Anna-Lisa Vollmer. “What You Need to Know about a Learning Robot: Identifying the Enabling  Architecture of Complex Systems.” <i>Cognitive Systems Research</i> 88 (2024).","apa":"Beierling, H., Richter, P., Brandt, M., Terfloth, L., Schulte, C., Wersing, H., &#38; Vollmer, A.-L. (2024). What you need to know about a learning robot: Identifying the enabling  architecture of complex systems. <i>Cognitive Systems Research</i>, <i>88</i>.","bibtex":"@article{Beierling_Richter_Brandt_Terfloth_Schulte_Wersing_Vollmer_2024, title={What you need to know about a learning robot: Identifying the enabling  architecture of complex systems}, volume={88}, journal={Cognitive Systems Research}, publisher={Elsevier}, author={Beierling, Helen and Richter, Phillip and Brandt, Mara and Terfloth, Lutz and Schulte, Carsten and Wersing, Heiko and Vollmer, Anna-Lisa}, year={2024} }","mla":"Beierling, Helen, et al. “What You Need to Know about a Learning Robot: Identifying the Enabling  Architecture of Complex Systems.” <i>Cognitive Systems Research</i>, vol. 88, Elsevier, 2024.","short":"H. Beierling, P. Richter, M. Brandt, L. Terfloth, C. Schulte, H. Wersing, A.-L. Vollmer, Cognitive Systems Research 88 (2024)."},"intvolume":"        88","oa":"1","date_updated":"2025-09-17T13:32:31Z","author":[{"full_name":"Beierling, Helen","last_name":"Beierling","first_name":"Helen"},{"last_name":"Richter","full_name":"Richter, Phillip","first_name":"Phillip"},{"last_name":"Brandt","full_name":"Brandt, Mara","first_name":"Mara"},{"last_name":"Terfloth","full_name":"Terfloth, Lutz","first_name":"Lutz"},{"full_name":"Schulte, Carsten","last_name":"Schulte","first_name":"Carsten"},{"full_name":"Wersing, Heiko","last_name":"Wersing","first_name":"Heiko"},{"last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa","first_name":"Anna-Lisa"}],"volume":88,"main_file_link":[{"open_access":"1"}]},{"publication_status":"published","citation":{"ama":"Groß A, Singh A, Banh NC, et al. Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>. 2023;10. doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>","ieee":"A. Groß <i>et al.</i>, “Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue,” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi: <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","chicago":"Groß, A., Amit Singh, Ngoc Chi Banh, B. Richter, Ingrid Scharlau, Katharina J. Rohlfing, and B. Wrede. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i> 10 (2023). <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>.","short":"A. Groß, A. Singh, N.C. Banh, B. Richter, I. Scharlau, K.J. Rohlfing, B. Wrede, Frontiers in Robotics and AI 10 (2023).","bibtex":"@article{Groß_Singh_Banh_Richter_Scharlau_Rohlfing_Wrede_2023, title={Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue}, volume={10}, DOI={<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>}, journal={Frontiers in Robotics and AI}, author={Groß, A. and Singh, Amit and Banh, Ngoc Chi and Richter, B. and Scharlau, Ingrid and Rohlfing, Katharina J. and Wrede, B.}, year={2023} }","mla":"Groß, A., et al. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","apa":"Groß, A., Singh, A., Banh, N. C., Richter, B., Scharlau, I., Rohlfing, K. J., &#38; Wrede, B. (2023). Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>, <i>10</i>. <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>"},"intvolume":"        10","oa":"1","date_updated":"2024-06-26T08:01:50Z","author":[{"first_name":"A.","last_name":"Groß","full_name":"Groß, A."},{"first_name":"Amit","full_name":"Singh, Amit","id":"91018","last_name":"Singh","orcid":"0000-0002-7789-1521"},{"orcid":"0000-0002-5946-4542","last_name":"Banh","full_name":"Banh, Ngoc Chi","id":"38219","first_name":"Ngoc Chi"},{"last_name":"Richter","full_name":"Richter, B.","first_name":"B."},{"first_name":"Ingrid","id":"451","full_name":"Scharlau, Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina J.","id":"50352","first_name":"Katharina J."},{"full_name":"Wrede, B.","last_name":"Wrede","first_name":"B."}],"volume":10,"main_file_link":[{"open_access":"1","url":"https://www.frontiersin.org/articles/10.3389/frobt.2023.1236184/full"}],"doi":"10.3389/frobt.2023.1236184","type":"journal_article","status":"public","project":[{"grant_number":"438445824","_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)"}],"_id":"48543","user_id":"38219","department":[{"_id":"749"}],"article_type":"original","funded_apc":"1","quality_controlled":"1","year":"2023","date_created":"2023-10-30T09:29:16Z","title":"Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue","publication":"Frontiers in Robotics and AI","abstract":[{"text":"Explanation has been identified as an important capability for AI-based systems, but research on systematic strategies for achieving understanding in interaction with such systems is still sparse. Negation is a linguistic strategy that is often used in explanations. It creates a contrast space between the affirmed and the negated item that enriches explaining processes with additional contextual information. While negation in human speech has been shown to lead to higher processing costs and worse task performance in terms of recall or action execution when used in isolation, it can decrease processing costs when used in context. So far, it has not been considered as a guiding strategy for explanations in human-robot interaction. We conducted an empirical study to investigate the use of negation as a guiding strategy in explanatory human-robot dialogue, in which a virtual robot explains tasks and possible actions to a human explainee to solve them in terms of gestures on a touchscreen. Our results show that negation vs. affirmation 1) increases processing costs measured as reaction time and 2) increases several aspects of task performance. While there was no significant effect of negation on the number of initially correctly executed gestures, we found a significantly lower number of attempts—measured as breaks in the finger movement data before the correct gesture was carried out—when being instructed through a negation. We further found that the gestures significantly resembled the presented prototype gesture more following an instruction with a negation as opposed to an affirmation. Also, the participants rated the benefit of contrastive vs. affirmative explanations significantly higher. Repeating the instructions decreased the effects of negation, yielding similar processing costs and task performance measures for negation and affirmation after several iterations. We discuss our results with respect to possible effects of negation on linguistic processing of explanations and limitations of our study.","lang":"eng"}],"keyword":["HRI","XAI","negation","understanding","explaining","touch interaction","gesture"],"language":[{"iso":"eng"}]},{"type":"journal_article","publication":"International Journal of Advanced Robotic Systems","abstract":[{"lang":"eng","text":"Co-development of action, conceptualization and social interaction mutually scaffold and support each other within a virtuous feedback cycle in the development of human language in children. Within this framework, the purpose of this article is to bring together diverse but complementary accounts of research methods that jointly contribute to our understanding of cognitive development and in particular, language acquisition in robots. Thus, we include research pertaining to developmental robotics, cognitive science, psychology, linguistics and neuroscience, as well as practical computer science and engineering. The different studies are not at this stage all connected into a cohesive whole; rather, they are presented to illuminate the need for multiple different approaches that complement each other in the pursuit of understanding cognitive development in robots. Extensive experiments involving the humanoid robot iCub are reported, while human learning relevant to developmental robotics has also contributed useful results. Disparate approaches are brought together via common underlying design principles. Without claiming to model human language acquisition directly, we are nonetheless inspired by analogous development in humans and consequently, our investigations include the parallel co-development of action, conceptualization and social interaction. Though these different approaches need to ultimately be integrated into a coherent, unified body of knowledge, progress is currently also being made by pursuing individual methods."}],"status":"public","_id":"17182","user_id":"14931","department":[{"_id":"749"}],"keyword":["Robot Language","Human Robot Interaction","HRI","Developmental Robotics","Cognitive Bootstrapping","Statistical Learning"],"language":[{"iso":"eng"}],"publication_identifier":{"issn":["1729-8814"]},"issue":"3","year":"2016","citation":{"ama":"Lyon C, Nehaniv CL, Saunders J, et al. Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles. <i>International Journal of Advanced Robotic Systems</i>. 2016;13(3). doi:<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>","ieee":"C. Lyon <i>et al.</i>, “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles,” <i>International Journal of Advanced Robotic Systems</i>, vol. 13, no. 3, 2016, doi: <a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>.","chicago":"Lyon, Caroline, Chrystopher L. Nehaniv, Joe Saunders, Tony Belpaeme, Ambra Bisio, Kerstin Fischer, Frank Forster, et al. “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles.” <i>International Journal of Advanced Robotic Systems</i> 13, no. 3 (2016). <a href=\"https://doi.org/10.5772/63462\">https://doi.org/10.5772/63462</a>.","short":"C. Lyon, C.L. Nehaniv, J. Saunders, T. Belpaeme, A. Bisio, K. Fischer, F. Forster, H. Lehmann, G. Metta, V. Mohan, A. Morse, S. Nolfi, F. Nori, K. Rohlfing, A. Sciutti, J. Tani, E. Tuci, B. Wrede, A. Zeschel, A. Cangelosi, International Journal of Advanced Robotic Systems 13 (2016).","mla":"Lyon, Caroline, et al. “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles.” <i>International Journal of Advanced Robotic Systems</i>, vol. 13, no. 3, Intech Europe, 2016, doi:<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>.","bibtex":"@article{Lyon_Nehaniv_Saunders_Belpaeme_Bisio_Fischer_Forster_Lehmann_Metta_Mohan_et al._2016, title={Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles}, volume={13}, DOI={<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>}, number={3}, journal={International Journal of Advanced Robotic Systems}, publisher={Intech Europe}, author={Lyon, Caroline and Nehaniv, Chrystopher L. and Saunders, Joe and Belpaeme, Tony and Bisio, Ambra and Fischer, Kerstin and Forster, Frank and Lehmann, Hagen and Metta, Giorgio and Mohan, Vishwanathan and et al.}, year={2016} }","apa":"Lyon, C., Nehaniv, C. L., Saunders, J., Belpaeme, T., Bisio, A., Fischer, K., Forster, F., Lehmann, H., Metta, G., Mohan, V., Morse, A., Nolfi, S., Nori, F., Rohlfing, K., Sciutti, A., Tani, J., Tuci, E., Wrede, B., Zeschel, A., &#38; Cangelosi, A. (2016). Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles. <i>International Journal of Advanced Robotic Systems</i>, <i>13</i>(3). <a href=\"https://doi.org/10.5772/63462\">https://doi.org/10.5772/63462</a>"},"intvolume":"        13","publisher":"Intech Europe","date_updated":"2023-02-01T16:04:48Z","date_created":"2020-06-24T13:00:58Z","author":[{"last_name":"Lyon","full_name":"Lyon, Caroline","first_name":"Caroline"},{"last_name":"Nehaniv","full_name":"Nehaniv, Chrystopher L.","first_name":"Chrystopher L."},{"first_name":"Joe","full_name":"Saunders, Joe","last_name":"Saunders"},{"full_name":"Belpaeme, Tony","last_name":"Belpaeme","first_name":"Tony"},{"first_name":"Ambra","last_name":"Bisio","full_name":"Bisio, Ambra"},{"first_name":"Kerstin","full_name":"Fischer, Kerstin","last_name":"Fischer"},{"first_name":"Frank","last_name":"Forster","full_name":"Forster, Frank"},{"first_name":"Hagen","last_name":"Lehmann","full_name":"Lehmann, Hagen"},{"last_name":"Metta","full_name":"Metta, Giorgio","first_name":"Giorgio"},{"full_name":"Mohan, Vishwanathan","last_name":"Mohan","first_name":"Vishwanathan"},{"first_name":"Anthony","full_name":"Morse, Anthony","last_name":"Morse"},{"last_name":"Nolfi","full_name":"Nolfi, Stefano","first_name":"Stefano"},{"last_name":"Nori","full_name":"Nori, Francesco","first_name":"Francesco"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352","first_name":"Katharina"},{"full_name":"Sciutti, Alessandra","last_name":"Sciutti","first_name":"Alessandra"},{"full_name":"Tani, Jun","last_name":"Tani","first_name":"Jun"},{"first_name":"Elio","last_name":"Tuci","full_name":"Tuci, Elio"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"},{"first_name":"Arne","last_name":"Zeschel","full_name":"Zeschel, Arne"},{"first_name":"Angelo","full_name":"Cangelosi, Angelo","last_name":"Cangelosi"}],"volume":13,"title":"Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles","doi":"10.5772/63462"},{"type":"journal_article","status":"public","_id":"17233","user_id":"14931","department":[{"_id":"749"}],"publication_identifier":{"issn":["1572-0381"]},"citation":{"apa":"Fischer, K., Foth, K., Rohlfing, K., &#38; Wrede, B. (2011). Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>, <i>12</i>(1), 134–161. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>","mla":"Fischer, Kerstin, et al. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i>, vol. 12, no. 1, John Benjamins Publishing Company, 2011, pp. 134–61, doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","bibtex":"@article{Fischer_Foth_Rohlfing_Wrede_2011, title={Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot}, volume={12}, DOI={<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>}, number={1}, journal={Interaction Studies}, publisher={John Benjamins Publishing Company}, author={Fischer, Kerstin and Foth, Kilian and Rohlfing, Katharina and Wrede, Britta}, year={2011}, pages={134–161} }","short":"K. Fischer, K. Foth, K. Rohlfing, B. Wrede, Interaction Studies 12 (2011) 134–161.","ieee":"K. Fischer, K. Foth, K. Rohlfing, and B. Wrede, “Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot,” <i>Interaction Studies</i>, vol. 12, no. 1, pp. 134–161, 2011, doi: <a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","chicago":"Fischer, Kerstin, Kilian Foth, Katharina Rohlfing, and Britta Wrede. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i> 12, no. 1 (2011): 134–61. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>.","ama":"Fischer K, Foth K, Rohlfing K, Wrede B. Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>. 2011;12(1):134-161. doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>"},"page":"134-161","intvolume":"        12","date_updated":"2023-02-01T12:56:04Z","author":[{"full_name":"Fischer, Kerstin","last_name":"Fischer","first_name":"Kerstin"},{"full_name":"Foth, Kilian","last_name":"Foth","first_name":"Kilian"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352","first_name":"Katharina"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"volume":12,"doi":"10.1075/is.12.1.06fis","publication":"Interaction Studies","abstract":[{"lang":"eng","text":"It has been proposed that the design of robots might benefit from interactions that are similar to caregiver–child interactions, which is tailored to children’s respective capacities to a high degree. However, so far little is known about how people adapt their tutoring behaviour to robots and whether robots can evoke input that is similar to child-directed interaction. The paper presents detailed analyses of speakers’ linguistic and non-linguistic behaviour, such as action demonstration, in two comparable situations: In one experiment, parents described and explained to their nonverbal infants the use of certain everyday objects; in the other experiment, participants tutored a simulated robot on the same objects. The results, which show considerable differences between the two situations on almost all measures, are discussed in the light of the computer-as-social-actor paradigm and the register hypothesis."}],"keyword":["human–robot interaction (HRI)","social communication","register theory","motionese","robotese","child-directed speech (CDS)","motherese","mindless transfer","computers-as-social-actors"],"language":[{"iso":"eng"}],"issue":"1","year":"2011","publisher":"John Benjamins Publishing Company","date_created":"2020-06-24T13:01:57Z","title":"Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot"},{"language":[{"iso":"eng"}],"keyword":["Ostensive Signals","Contingency","Motionese","hri"],"user_id":"14931","department":[{"_id":"749"}],"_id":"17264","status":"public","abstract":[{"lang":"eng","text":"In developmental research, tutoring behavior has been identified as scaffolding infants’ learning processes. Infants seem sensitive to tutoring situations and they detect these by ostensive cues [4]. Some social signals such as eye-gaze, child-directed speech (Motherese), child-directed motion (Motionese), and contingency have been shown to serve as ostensive cues. The concept of contingency describes exchanges in which two agents interact with each other reciprocally. Csibra and Gergely argued that contingency is a characteristic ostensive stimulus of a tutoring situation [4]. In order for a robot to be treated similar to an infant, it has to both, be sensitive to the ostensive stimuli on the one hand and induce tutoring behavior by its feedback about its capabilities on the other hand. In this paper, we raise the question whether a robot can be treated similar to an infant in an interaction. We present results concerning the acceptance of a robotic agent in a social learning scenario, which we obtained via comparison to interactions with 8-11 months old infants and adults in equal conditions. We applied measurements for motion modifications (Motionese) and eye-gaze behavior. Our results reveal significant differences between Adult-Child Interaction (ACI), Adult-Adult Interaction (AAI) and Adult- Robot Interaction (ARI) suggesting that in ARI, robotdirected tutoring behavior is even more accentuated in terms of Motionese, but contingent responsivity is impaired. Our results confirm previous findings [14] concerning the differences between ACI, AAI, and ARI and constitute an important empirical basis for making use of ostensive stimuli as social signals for tutoring behavior in social robotics."}],"type":"conference","publication":"IEEE International Workshop on Social Signal Processing","doi":"10.1109/acii.2009.5349507","title":"Which ostensive stimuli can be used for a robot to detect and maintain tutoring situations?","author":[{"first_name":"Katrin Solveig","last_name":"Lohan","full_name":"Lohan, Katrin Solveig"},{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa"},{"first_name":"Jannik","last_name":"Fritsch","full_name":"Fritsch, Jannik"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352","first_name":"Katharina"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"}],"date_created":"2020-06-24T13:02:33Z","date_updated":"2023-02-01T13:04:03Z","publisher":"International Computer Science Institute","citation":{"apa":"Lohan, K. S., Vollmer, A.-L., Fritsch, J., Rohlfing, K., &#38; Wrede, B. (2009). Which ostensive stimuli can be used for a robot to detect and maintain tutoring situations? <i>IEEE International Workshop on Social Signal Processing</i>. <a href=\"https://doi.org/10.1109/acii.2009.5349507\">https://doi.org/10.1109/acii.2009.5349507</a>","mla":"Lohan, Katrin Solveig, et al. “Which Ostensive Stimuli Can Be Used for a Robot to Detect and Maintain Tutoring Situations?” <i>IEEE International Workshop on Social Signal Processing</i>, International Computer Science Institute, 2009, doi:<a href=\"https://doi.org/10.1109/acii.2009.5349507\">10.1109/acii.2009.5349507</a>.","short":"K.S. Lohan, A.-L. Vollmer, J. Fritsch, K. Rohlfing, B. Wrede, in: IEEE International Workshop on Social Signal Processing, International Computer Science Institute, 2009.","bibtex":"@inproceedings{Lohan_Vollmer_Fritsch_Rohlfing_Wrede_2009, title={Which ostensive stimuli can be used for a robot to detect and maintain tutoring situations?}, DOI={<a href=\"https://doi.org/10.1109/acii.2009.5349507\">10.1109/acii.2009.5349507</a>}, booktitle={IEEE International Workshop on Social Signal Processing}, publisher={International Computer Science Institute}, author={Lohan, Katrin Solveig and Vollmer, Anna-Lisa and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}, year={2009} }","ama":"Lohan KS, Vollmer A-L, Fritsch J, Rohlfing K, Wrede B. Which ostensive stimuli can be used for a robot to detect and maintain tutoring situations? In: <i>IEEE International Workshop on Social Signal Processing</i>. International Computer Science Institute; 2009. doi:<a href=\"https://doi.org/10.1109/acii.2009.5349507\">10.1109/acii.2009.5349507</a>","chicago":"Lohan, Katrin Solveig, Anna-Lisa Vollmer, Jannik Fritsch, Katharina Rohlfing, and Britta Wrede. “Which Ostensive Stimuli Can Be Used for a Robot to Detect and Maintain Tutoring Situations?” In <i>IEEE International Workshop on Social Signal Processing</i>. International Computer Science Institute, 2009. <a href=\"https://doi.org/10.1109/acii.2009.5349507\">https://doi.org/10.1109/acii.2009.5349507</a>.","ieee":"K. S. Lohan, A.-L. Vollmer, J. Fritsch, K. Rohlfing, and B. Wrede, “Which ostensive stimuli can be used for a robot to detect and maintain tutoring situations?,” 2009, doi: <a href=\"https://doi.org/10.1109/acii.2009.5349507\">10.1109/acii.2009.5349507</a>."},"year":"2009","publication_identifier":{"isbn":["9781424448005"]}}]
