[{"main_file_link":[{"open_access":"1","url":"https://pmc.ncbi.nlm.nih.gov/articles/PMC12312635/"}],"volume":12,"author":[{"first_name":"Helen","full_name":"Beierling, Helen","last_name":"Beierling"},{"first_name":"Robin ","last_name":"Beierling","full_name":"Beierling, Robin "},{"last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa","first_name":"Anna-Lisa"}],"oa":"1","date_updated":"2025-09-17T13:38:18Z","intvolume":"        12","citation":{"ieee":"H. Beierling, R. Beierling, and A.-L. Vollmer, “The power of combined modalities in interactive robot learning,” <i>Frontiers in Robotics and AI</i>, vol. 12, 2025.","chicago":"Beierling, Helen, Robin  Beierling, and Anna-Lisa Vollmer. “The Power of Combined Modalities in Interactive Robot Learning.” <i>Frontiers in Robotics and AI</i> 12 (2025).","ama":"Beierling H, Beierling R, Vollmer A-L. The power of combined modalities in interactive robot learning. <i>Frontiers in Robotics and AI</i>. 2025;12.","mla":"Beierling, Helen, et al. “The Power of Combined Modalities in Interactive Robot Learning.” <i>Frontiers in Robotics and AI</i>, vol. 12, Frontiers , 2025.","bibtex":"@article{Beierling_Beierling_Vollmer_2025, title={The power of combined modalities in interactive robot learning}, volume={12}, journal={Frontiers in Robotics and AI}, publisher={Frontiers }, author={Beierling, Helen and Beierling, Robin  and Vollmer, Anna-Lisa}, year={2025} }","short":"H. Beierling, R. Beierling, A.-L. Vollmer, Frontiers in Robotics and AI 12 (2025).","apa":"Beierling, H., Beierling, R., &#38; Vollmer, A.-L. (2025). The power of combined modalities in interactive robot learning. <i>Frontiers in Robotics and AI</i>, <i>12</i>."},"has_accepted_license":"1","publication_status":"published","funded_apc":"1","file_date_updated":"2025-09-17T13:36:09Z","extern":"1","article_type":"original","user_id":"50995","_id":"55400","project":[{"_id":"123","name":"TRR 318 - B5: TRR 318 - Subproject B5"}],"status":"public","type":"journal_article","title":"The power of combined modalities in interactive robot learning","date_created":"2024-07-26T08:35:24Z","publisher":"Frontiers ","year":"2025","language":[{"iso":"eng"}],"keyword":["human-robot interaction","human-in-the-loop learning","reinforcement learning","interactive robot learning","multi-modal feedback","learning from demonstration","preference-based learning","scaffolding in robot learning"],"ddc":["004"],"file":[{"success":1,"relation":"main_file","content_type":"application/pdf","file_size":36978223,"file_name":"frobt-12-1598968.pdf","access_level":"closed","file_id":"61331","date_updated":"2025-09-17T13:36:09Z","creator":"helebeen","date_created":"2025-09-17T13:36:09Z"}],"abstract":[{"text":"This study contributes to the evolving field of robot learning in interaction\r\nwith humans, examining the impact of diverse input modalities on learning\r\noutcomes. It introduces the concept of \"meta-modalities\" which encapsulate\r\nadditional forms of feedback beyond the traditional preference and scalar\r\nfeedback mechanisms. Unlike prior research that focused on individual\r\nmeta-modalities, this work evaluates their combined effect on learning\r\noutcomes. Through a study with human participants, we explore user preferences\r\nfor these modalities and their impact on robot learning performance. Our\r\nfindings reveal that while individual modalities are perceived differently,\r\ntheir combination significantly improves learning behavior and usability. This\r\nresearch not only provides valuable insights into the optimization of\r\nhuman-robot interactive task learning but also opens new avenues for enhancing\r\nthe interactive freedom and scaffolding capabilities provided to users in such\r\nsettings.","lang":"eng"}],"publication":"Frontiers in Robotics and AI"},{"abstract":[{"lang":"eng","text":"Robot learning from humans has been proposed and researched for several decades as a means to enable robots to learn new skills or\r\nadapt existing ones to new situations. Recent advances in artificial intelligence, including learning approaches like reinforcement\r\nlearning and architectures like transformers and foundation models, combined with access to massive datasets, has created attractive\r\nopportunities to apply those data-hungry techniques to this problem. We argue that the focus on massive amounts of pre-collected\r\ndata, and the resulting learning paradigm, where humans demonstrate and robots learn in isolation, is overshadowing a specialized\r\narea of work we term Human-Interactive-Robot-Learning (HIRL). This paradigm, wherein robots and humans interact during the\r\nlearning process, is at the intersection of multiple fields (artificial intelligence, robotics, human-computer interaction, design and others)\r\nand holds unique promise. Using HIRL, robots can achieve greater sample efficiency (as humans can provide task knowledge through\r\ninteraction), align with human preferences (as humans can guide the robot behavior towards their expectations), and explore more\r\nmeaningfully and safely (as humans can utilize domain knowledge to guide learning and prevent catastrophic failures). This can result\r\nin robotic systems that can more quickly and easily adapt to new tasks in human environments. The objective of this paper is to\r\nprovide a broad and consistent overview of HIRL research and to guide researchers toward understanding the scope of HIRL, and\r\ncurrent open or underexplored challenges related to four themes — namely, human, robot learning, interaction, and broader context.\r\nThe paper includes concrete use cases to illustrate the interaction between these challenges and inspire further research according to\r\nbroad recommendations and a call for action for the growing HIRL community"}],"status":"public","type":"journal_article","publication":"Transactions on Human-Robot Interaction","article_type":"original","keyword":["Robot learning","Interactive learning systems","Human-robot interaction","Human-in-the-loop machine learning","Teaching and learning"],"language":[{"iso":"eng"}],"project":[{"name":"TRR 318 - Subproject B5","_id":"123"}],"_id":"61327","user_id":"50995","year":"2025","citation":{"ieee":"K. Baraka <i>et al.</i>, “Human-Interactive Robot Learning: Definition, Challenges, and Recommendations,” <i>Transactions on Human-Robot Interaction</i>.","chicago":"Baraka, Kim , Ifrah Idrees, Taylor Kessler Faulkner, Erdem Biyik, Serena Booth, Mohamed Chetouani, Daniel H. Grollman, et al. “Human-Interactive Robot Learning: Definition, Challenges, and Recommendations.” <i>Transactions on Human-Robot Interaction</i>, n.d.","ama":"Baraka K, Idrees I, Faulkner TK, et al. Human-Interactive Robot Learning: Definition, Challenges, and Recommendations. <i>Transactions on Human-Robot Interaction</i>.","apa":"Baraka, K., Idrees, I., Faulkner, T. K., Biyik, E., Booth, S., Chetouani, M., Grollman, D. H., Saran, A., Senft, E., Tulli, S., Vollmer, A.-L., Andriella, A., Beierling, H., Horter, T., Kober, J., Sheidlower, I., Taylor, M. E., van Waveren, S., &#38; Xiao, X. (n.d.). Human-Interactive Robot Learning: Definition, Challenges, and Recommendations. <i>Transactions on Human-Robot Interaction</i>.","bibtex":"@article{Baraka_Idrees_Faulkner_Biyik_Booth_Chetouani_Grollman_Saran_Senft_Tulli_et al., title={Human-Interactive Robot Learning: Definition, Challenges, and Recommendations}, journal={Transactions on Human-Robot Interaction}, author={Baraka, Kim  and Idrees, Ifrah and Faulkner, Taylor Kessler and Biyik, Erdem and Booth, Serena and Chetouani, Mohamed and Grollman, Daniel H. and Saran, Akanksha and Senft, Emmanuel and Tulli, Silvia and et al.} }","mla":"Baraka, Kim, et al. “Human-Interactive Robot Learning: Definition, Challenges, and Recommendations.” <i>Transactions on Human-Robot Interaction</i>.","short":"K. Baraka, I. Idrees, T.K. Faulkner, E. Biyik, S. Booth, M. Chetouani, D.H. Grollman, A. Saran, E. Senft, S. Tulli, A.-L. Vollmer, A. Andriella, H. Beierling, T. Horter, J. Kober, I. Sheidlower, M.E. Taylor, S. van Waveren, X. Xiao, Transactions on Human-Robot Interaction (n.d.)."},"publication_status":"submitted","title":"Human-Interactive Robot Learning: Definition, Challenges, and Recommendations","date_updated":"2025-09-17T13:40:16Z","author":[{"last_name":"Baraka","full_name":"Baraka, Kim ","first_name":"Kim "},{"first_name":"Ifrah","full_name":"Idrees, Ifrah","last_name":"Idrees"},{"first_name":"Taylor Kessler","last_name":"Faulkner","full_name":"Faulkner, Taylor Kessler"},{"first_name":"Erdem","last_name":"Biyik","full_name":"Biyik, Erdem"},{"first_name":"Serena","last_name":"Booth","full_name":"Booth, Serena"},{"full_name":"Chetouani, Mohamed","last_name":"Chetouani","first_name":"Mohamed"},{"first_name":"Daniel H.","full_name":"Grollman, Daniel H.","last_name":"Grollman"},{"last_name":"Saran","full_name":"Saran, Akanksha","first_name":"Akanksha"},{"full_name":"Senft, Emmanuel","last_name":"Senft","first_name":"Emmanuel"},{"full_name":"Tulli, Silvia","last_name":"Tulli","first_name":"Silvia"},{"last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa","first_name":"Anna-Lisa"},{"first_name":"Antonio","last_name":"Andriella","full_name":"Andriella, Antonio"},{"last_name":"Beierling","full_name":"Beierling, Helen","first_name":"Helen"},{"full_name":"Horter, Tiffany","last_name":"Horter","first_name":"Tiffany"},{"first_name":"Jens","full_name":"Kober, Jens","last_name":"Kober"},{"first_name":"Isaac","full_name":"Sheidlower, Isaac","last_name":"Sheidlower"},{"first_name":"Matthew E.","full_name":"Taylor, Matthew E.","last_name":"Taylor"},{"last_name":"van Waveren","full_name":"van Waveren, Sanne","first_name":"Sanne"},{"last_name":"Xiao","full_name":"Xiao, Xuesu","first_name":"Xuesu"}],"date_created":"2025-09-17T12:42:45Z"},{"type":"journal_article","publication":"International Journal of Advanced Robotic Systems","abstract":[{"lang":"eng","text":"Co-development of action, conceptualization and social interaction mutually scaffold and support each other within a virtuous feedback cycle in the development of human language in children. Within this framework, the purpose of this article is to bring together diverse but complementary accounts of research methods that jointly contribute to our understanding of cognitive development and in particular, language acquisition in robots. Thus, we include research pertaining to developmental robotics, cognitive science, psychology, linguistics and neuroscience, as well as practical computer science and engineering. The different studies are not at this stage all connected into a cohesive whole; rather, they are presented to illuminate the need for multiple different approaches that complement each other in the pursuit of understanding cognitive development in robots. Extensive experiments involving the humanoid robot iCub are reported, while human learning relevant to developmental robotics has also contributed useful results. Disparate approaches are brought together via common underlying design principles. Without claiming to model human language acquisition directly, we are nonetheless inspired by analogous development in humans and consequently, our investigations include the parallel co-development of action, conceptualization and social interaction. Though these different approaches need to ultimately be integrated into a coherent, unified body of knowledge, progress is currently also being made by pursuing individual methods."}],"status":"public","_id":"17182","user_id":"14931","department":[{"_id":"749"}],"keyword":["Robot Language","Human Robot Interaction","HRI","Developmental Robotics","Cognitive Bootstrapping","Statistical Learning"],"language":[{"iso":"eng"}],"publication_identifier":{"issn":["1729-8814"]},"issue":"3","year":"2016","citation":{"ama":"Lyon C, Nehaniv CL, Saunders J, et al. Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles. <i>International Journal of Advanced Robotic Systems</i>. 2016;13(3). doi:<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>","ieee":"C. Lyon <i>et al.</i>, “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles,” <i>International Journal of Advanced Robotic Systems</i>, vol. 13, no. 3, 2016, doi: <a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>.","chicago":"Lyon, Caroline, Chrystopher L. Nehaniv, Joe Saunders, Tony Belpaeme, Ambra Bisio, Kerstin Fischer, Frank Forster, et al. “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles.” <i>International Journal of Advanced Robotic Systems</i> 13, no. 3 (2016). <a href=\"https://doi.org/10.5772/63462\">https://doi.org/10.5772/63462</a>.","apa":"Lyon, C., Nehaniv, C. L., Saunders, J., Belpaeme, T., Bisio, A., Fischer, K., Forster, F., Lehmann, H., Metta, G., Mohan, V., Morse, A., Nolfi, S., Nori, F., Rohlfing, K., Sciutti, A., Tani, J., Tuci, E., Wrede, B., Zeschel, A., &#38; Cangelosi, A. (2016). Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles. <i>International Journal of Advanced Robotic Systems</i>, <i>13</i>(3). <a href=\"https://doi.org/10.5772/63462\">https://doi.org/10.5772/63462</a>","mla":"Lyon, Caroline, et al. “Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles.” <i>International Journal of Advanced Robotic Systems</i>, vol. 13, no. 3, Intech Europe, 2016, doi:<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>.","bibtex":"@article{Lyon_Nehaniv_Saunders_Belpaeme_Bisio_Fischer_Forster_Lehmann_Metta_Mohan_et al._2016, title={Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles}, volume={13}, DOI={<a href=\"https://doi.org/10.5772/63462\">10.5772/63462</a>}, number={3}, journal={International Journal of Advanced Robotic Systems}, publisher={Intech Europe}, author={Lyon, Caroline and Nehaniv, Chrystopher L. and Saunders, Joe and Belpaeme, Tony and Bisio, Ambra and Fischer, Kerstin and Forster, Frank and Lehmann, Hagen and Metta, Giorgio and Mohan, Vishwanathan and et al.}, year={2016} }","short":"C. Lyon, C.L. Nehaniv, J. Saunders, T. Belpaeme, A. Bisio, K. Fischer, F. Forster, H. Lehmann, G. Metta, V. Mohan, A. Morse, S. Nolfi, F. Nori, K. Rohlfing, A. Sciutti, J. Tani, E. Tuci, B. Wrede, A. Zeschel, A. Cangelosi, International Journal of Advanced Robotic Systems 13 (2016)."},"intvolume":"        13","date_updated":"2023-02-01T16:04:48Z","publisher":"Intech Europe","date_created":"2020-06-24T13:00:58Z","author":[{"last_name":"Lyon","full_name":"Lyon, Caroline","first_name":"Caroline"},{"first_name":"Chrystopher L.","last_name":"Nehaniv","full_name":"Nehaniv, Chrystopher L."},{"first_name":"Joe","full_name":"Saunders, Joe","last_name":"Saunders"},{"first_name":"Tony","full_name":"Belpaeme, Tony","last_name":"Belpaeme"},{"first_name":"Ambra","full_name":"Bisio, Ambra","last_name":"Bisio"},{"first_name":"Kerstin","full_name":"Fischer, Kerstin","last_name":"Fischer"},{"full_name":"Forster, Frank","last_name":"Forster","first_name":"Frank"},{"first_name":"Hagen","full_name":"Lehmann, Hagen","last_name":"Lehmann"},{"full_name":"Metta, Giorgio","last_name":"Metta","first_name":"Giorgio"},{"first_name":"Vishwanathan","last_name":"Mohan","full_name":"Mohan, Vishwanathan"},{"first_name":"Anthony","full_name":"Morse, Anthony","last_name":"Morse"},{"full_name":"Nolfi, Stefano","last_name":"Nolfi","first_name":"Stefano"},{"first_name":"Francesco","last_name":"Nori","full_name":"Nori, Francesco"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"},{"first_name":"Alessandra","full_name":"Sciutti, Alessandra","last_name":"Sciutti"},{"last_name":"Tani","full_name":"Tani, Jun","first_name":"Jun"},{"full_name":"Tuci, Elio","last_name":"Tuci","first_name":"Elio"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"last_name":"Zeschel","full_name":"Zeschel, Arne","first_name":"Arne"},{"first_name":"Angelo","last_name":"Cangelosi","full_name":"Cangelosi, Angelo"}],"volume":13,"title":"Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles","doi":"10.5772/63462"},{"keyword":["learning","Human-robot interaction","Alignment","Robot social","Action understanding"],"language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Alignment is a phenomenon observed in human conversation: Dialog partners' behavior converges in many respects. Such alignment has been proposed to be automatic and the basis for communicating successfully. Recent research on human-computer dialog promotes a mediated communicative design account of alignment according to which the extent of alignment is influenced by interlocutors' beliefs about each other. Our work aims at adding to these findings in two ways. (a) Our work investigates alignment of manual actions, instead of lexical choice. (b) Participants interact with the iCub humanoid robot, instead of an artificial computer dialog system. Our results confirm that alignment also takes place in the domain of actions. We were not able to replicate the results of the original study in general in this setting, but in accordance with its findings, participants with a high questionnaire score for emotional stability and participants who are familiar with robots align their actions more to a robot they believe to be basic than to one they believe to be advanced. Regarding alignment over the course of an interaction, the extent of alignment seems to remain constant, when participants believe the robot to be advanced, but it increases over time, when participants believe the robot to be a basic version."}],"publication":"International Journal of Social Robotics","title":"Alignment to the Actions of a Robot","publisher":"Springer-Verlag","date_created":"2020-06-24T13:01:06Z","year":"2015","issue":"2","_id":"17189","department":[{"_id":"749"}],"user_id":"14931","status":"public","type":"journal_article","doi":"10.1007/s12369-014-0252-0","date_updated":"2023-02-01T16:07:40Z","volume":7,"author":[{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa"},{"id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing","first_name":"Katharina"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"},{"first_name":"Angelo","last_name":"Cangelosi","full_name":"Cangelosi, Angelo"}],"page":"241-252","intvolume":"         7","citation":{"ama":"Vollmer A-L, Rohlfing K, Wrede B, Cangelosi A. Alignment to the Actions of a Robot. <i>International Journal of Social Robotics</i>. 2015;7(2):241-252. doi:<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>","chicago":"Vollmer, Anna-Lisa, Katharina Rohlfing, Britta Wrede, and Angelo Cangelosi. “Alignment to the Actions of a Robot.” <i>International Journal of Social Robotics</i> 7, no. 2 (2015): 241–52. <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">https://doi.org/10.1007/s12369-014-0252-0</a>.","ieee":"A.-L. Vollmer, K. Rohlfing, B. Wrede, and A. Cangelosi, “Alignment to the Actions of a Robot,” <i>International Journal of Social Robotics</i>, vol. 7, no. 2, pp. 241–252, 2015, doi: <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>.","mla":"Vollmer, Anna-Lisa, et al. “Alignment to the Actions of a Robot.” <i>International Journal of Social Robotics</i>, vol. 7, no. 2, Springer-Verlag, 2015, pp. 241–52, doi:<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>.","bibtex":"@article{Vollmer_Rohlfing_Wrede_Cangelosi_2015, title={Alignment to the Actions of a Robot}, volume={7}, DOI={<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>}, number={2}, journal={International Journal of Social Robotics}, publisher={Springer-Verlag}, author={Vollmer, Anna-Lisa and Rohlfing, Katharina and Wrede, Britta and Cangelosi, Angelo}, year={2015}, pages={241–252} }","short":"A.-L. Vollmer, K. Rohlfing, B. Wrede, A. Cangelosi, International Journal of Social Robotics 7 (2015) 241–252.","apa":"Vollmer, A.-L., Rohlfing, K., Wrede, B., &#38; Cangelosi, A. (2015). Alignment to the Actions of a Robot. <i>International Journal of Social Robotics</i>, <i>7</i>(2), 241–252. <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">https://doi.org/10.1007/s12369-014-0252-0</a>"},"publication_identifier":{"issn":["1875-4791"]}},{"title":"Co-development of manner and path concepts in language, action, and eye-gaze behavior","doi":"10.1111/tops.12098","date_updated":"2023-02-01T16:08:33Z","publisher":"Wiley-Blackwell","volume":6,"author":[{"first_name":"Katrin S.","last_name":"Lohan","full_name":"Lohan, Katrin S."},{"first_name":"Sascha","full_name":"Griffiths, Sascha","last_name":"Griffiths"},{"full_name":"Sciutti, Alessandra","last_name":"Sciutti","first_name":"Alessandra"},{"last_name":"Partmann","full_name":"Partmann, Tim C.","first_name":"Tim C."},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"}],"date_created":"2020-06-24T13:01:09Z","year":"2014","page":"492-512","intvolume":"         6","citation":{"chicago":"Lohan, Katrin S., Sascha Griffiths, Alessandra Sciutti, Tim C. Partmann, and Katharina Rohlfing. “Co-Development of Manner and Path Concepts in Language, Action, and Eye-Gaze Behavior.” <i>Topics in Cognitive Science</i> 6, no. 3 (2014): 492–512. <a href=\"https://doi.org/10.1111/tops.12098\">https://doi.org/10.1111/tops.12098</a>.","ieee":"K. S. Lohan, S. Griffiths, A. Sciutti, T. C. Partmann, and K. Rohlfing, “Co-development of manner and path concepts in language, action, and eye-gaze behavior,” <i>Topics in Cognitive Science</i>, vol. 6, no. 3, pp. 492–512, 2014, doi: <a href=\"https://doi.org/10.1111/tops.12098\">10.1111/tops.12098</a>.","ama":"Lohan KS, Griffiths S, Sciutti A, Partmann TC, Rohlfing K. Co-development of manner and path concepts in language, action, and eye-gaze behavior. <i>Topics in Cognitive Science</i>. 2014;6(3):492-512. doi:<a href=\"https://doi.org/10.1111/tops.12098\">10.1111/tops.12098</a>","bibtex":"@article{Lohan_Griffiths_Sciutti_Partmann_Rohlfing_2014, title={Co-development of manner and path concepts in language, action, and eye-gaze behavior}, volume={6}, DOI={<a href=\"https://doi.org/10.1111/tops.12098\">10.1111/tops.12098</a>}, number={3}, journal={Topics in Cognitive Science}, publisher={Wiley-Blackwell}, author={Lohan, Katrin S. and Griffiths, Sascha and Sciutti, Alessandra and Partmann, Tim C. and Rohlfing, Katharina}, year={2014}, pages={492–512} }","short":"K.S. Lohan, S. Griffiths, A. Sciutti, T.C. Partmann, K. Rohlfing, Topics in Cognitive Science 6 (2014) 492–512.","mla":"Lohan, Katrin S., et al. “Co-Development of Manner and Path Concepts in Language, Action, and Eye-Gaze Behavior.” <i>Topics in Cognitive Science</i>, vol. 6, no. 3, Wiley-Blackwell, 2014, pp. 492–512, doi:<a href=\"https://doi.org/10.1111/tops.12098\">10.1111/tops.12098</a>.","apa":"Lohan, K. S., Griffiths, S., Sciutti, A., Partmann, T. C., &#38; Rohlfing, K. (2014). Co-development of manner and path concepts in language, action, and eye-gaze behavior. <i>Topics in Cognitive Science</i>, <i>6</i>(3), 492–512. <a href=\"https://doi.org/10.1111/tops.12098\">https://doi.org/10.1111/tops.12098</a>"},"publication_identifier":{"issn":["1756-8757"]},"issue":"3","keyword":["Imitation","Tutoring","Adult-child interaction","Human-robot interaction","Semantics","Teachable robots"],"language":[{"iso":"eng"}],"_id":"17192","department":[{"_id":"749"}],"user_id":"14931","abstract":[{"lang":"eng","text":"In order for artificial intelligent systems to interact naturally with human users, they need to be able to learn from human instructions when actions should be imitated. Human tutoring will typically consist of action demonstrations accompanied by speech. In the following, the characteristics of human tutoring during action demonstration will be examined. A special focus will be put on the distinction between two kinds of motion events: path-oriented actions and manner-oriented actions. Such a distinction is inspired by the literature pertaining to cognitive linguistics, which indicates that the human conceptual system can distinguish these two distinct types of motion. These two kinds of actions are described in language by more path-oriented or more manner-oriented utterances. In path-oriented utterances, the source, trajectory, or goal is emphasized, whereas in manner-oriented utterances the medium, velocity, or means of motion are highlighted. We examined a video corpus of adult-child interactions comprised of three age groups of children-pre-lexical, early lexical, and lexical-and two different tasks, one emphasizing manner more strongly and one emphasizing path more strongly. We analyzed the language and motion of the caregiver and the gazing behavior of the child to highlight the differences between the tutoring and the acquisition of the manner and path concepts. The results suggest that age is an important factor in the development of these action categories. The analysis of this corpus has also been exploited to develop an intelligent robotic behavior -the tutoring spotter system-able to emulate children's behaviors in a tutoring situation, with the aim of evoking in human subjects a natural and effective behavior in teaching to a robot. The findings related to the development of manner and path concepts have been used to implement new effective feedback strategies in the tutoring spotter system, which should provide improvements in human-robot interaction."}],"status":"public","publication":"Topics in Cognitive Science","type":"journal_article"},{"issue":"2","publication_identifier":{"issn":["1875-4805"]},"page":"201-217","intvolume":"         4","citation":{"apa":"Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012). Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, <i>4</i>(2), 201–217. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>","mla":"Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17, doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","bibtex":"@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation and evaluation of communicative robot gesture}, volume={4}, DOI={<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>}, number={2}, journal={International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}, publisher={Springer Science + Business Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}, year={2012}, pages={201–217} }","short":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012) 201–217.","chicago":"Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i> 4, no. 2 (2012): 201–17. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>.","ieee":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation and evaluation of communicative robot gesture,” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, pp. 201–217, 2012, doi: <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","ama":"Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>"},"year":"2012","volume":4,"author":[{"full_name":"Salem, Maha","last_name":"Salem","first_name":"Maha"},{"last_name":"Kopp","full_name":"Kopp, Stefan","first_name":"Stefan"},{"full_name":"Wachsmuth, Ipke","last_name":"Wachsmuth","first_name":"Ipke"},{"first_name":"Katharina","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina"},{"last_name":"Joublin","full_name":"Joublin, Frank","first_name":"Frank"}],"date_created":"2020-06-24T13:01:48Z","date_updated":"2023-02-01T16:21:50Z","publisher":"Springer Science + Business Media","doi":"10.1007/s12369-011-0124-9","title":"Generation and evaluation of communicative robot gesture","publication":"International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions","type":"journal_article","status":"public","abstract":[{"lang":"eng","text":"How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance."}],"department":[{"_id":"749"}],"user_id":"14931","_id":"17225","language":[{"iso":"eng"}],"keyword":["Social Human-Robot Interaction","Multimodal Interaction and Conversational Skills","Robot Companions and Social Robots","Non-verbal Cues and Expressiveness"]},{"year":"2012","issue":"2","title":"Generation and evaluation of communicative robot gesture","date_created":"2020-07-28T11:44:02Z","publisher":"Springer Science + Business Media","abstract":[{"lang":"eng","text":"How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance."}],"publication":"International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions","language":[{"iso":"eng"}],"keyword":["Social Human-Robot Interaction","Multimodal Interaction and Conversational Skills","Robot Companions and Social Robots","Non-verbal Cues and Expressiveness"],"intvolume":"         4","page":"201-217","citation":{"ama":"Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>","chicago":"Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i> 4, no. 2 (2012): 201–17. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>.","ieee":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation and evaluation of communicative robot gesture,” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, pp. 201–217, 2012, doi: <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","apa":"Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012). Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, <i>4</i>(2), 201–217. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>","mla":"Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17, doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","short":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012) 201–217.","bibtex":"@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation and evaluation of communicative robot gesture}, volume={4}, DOI={<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>}, number={2}, journal={International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}, publisher={Springer Science + Business Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}, year={2012}, pages={201–217} }"},"publication_identifier":{"issn":["1875-4805"]},"doi":"10.1007/s12369-011-0124-9","volume":4,"author":[{"first_name":"Maha","full_name":"Salem, Maha","last_name":"Salem"},{"last_name":"Kopp","full_name":"Kopp, Stefan","first_name":"Stefan"},{"last_name":"Wachsmuth","full_name":"Wachsmuth, Ipke","first_name":"Ipke"},{"full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing","first_name":"Katharina"},{"last_name":"Joublin","full_name":"Joublin, Frank","first_name":"Frank"}],"date_updated":"2023-02-01T12:52:23Z","status":"public","type":"journal_article","department":[{"_id":"749"}],"user_id":"14931","_id":"17428"},{"issue":"1","year":"2011","date_created":"2020-06-24T13:01:57Z","publisher":"John Benjamins Publishing Company","title":"Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot","publication":"Interaction Studies","abstract":[{"lang":"eng","text":"It has been proposed that the design of robots might benefit from interactions that are similar to caregiver–child interactions, which is tailored to children’s respective capacities to a high degree. However, so far little is known about how people adapt their tutoring behaviour to robots and whether robots can evoke input that is similar to child-directed interaction. The paper presents detailed analyses of speakers’ linguistic and non-linguistic behaviour, such as action demonstration, in two comparable situations: In one experiment, parents described and explained to their nonverbal infants the use of certain everyday objects; in the other experiment, participants tutored a simulated robot on the same objects. The results, which show considerable differences between the two situations on almost all measures, are discussed in the light of the computer-as-social-actor paradigm and the register hypothesis."}],"language":[{"iso":"eng"}],"keyword":["human–robot interaction (HRI)","social communication","register theory","motionese","robotese","child-directed speech (CDS)","motherese","mindless transfer","computers-as-social-actors"],"publication_identifier":{"issn":["1572-0381"]},"intvolume":"        12","page":"134-161","citation":{"ama":"Fischer K, Foth K, Rohlfing K, Wrede B. Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>. 2011;12(1):134-161. doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>","ieee":"K. Fischer, K. Foth, K. Rohlfing, and B. Wrede, “Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot,” <i>Interaction Studies</i>, vol. 12, no. 1, pp. 134–161, 2011, doi: <a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","chicago":"Fischer, Kerstin, Kilian Foth, Katharina Rohlfing, and Britta Wrede. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i> 12, no. 1 (2011): 134–61. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>.","bibtex":"@article{Fischer_Foth_Rohlfing_Wrede_2011, title={Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot}, volume={12}, DOI={<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>}, number={1}, journal={Interaction Studies}, publisher={John Benjamins Publishing Company}, author={Fischer, Kerstin and Foth, Kilian and Rohlfing, Katharina and Wrede, Britta}, year={2011}, pages={134–161} }","mla":"Fischer, Kerstin, et al. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i>, vol. 12, no. 1, John Benjamins Publishing Company, 2011, pp. 134–61, doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","short":"K. Fischer, K. Foth, K. Rohlfing, B. Wrede, Interaction Studies 12 (2011) 134–161.","apa":"Fischer, K., Foth, K., Rohlfing, K., &#38; Wrede, B. (2011). Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>, <i>12</i>(1), 134–161. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>"},"volume":12,"author":[{"last_name":"Fischer","full_name":"Fischer, Kerstin","first_name":"Kerstin"},{"last_name":"Foth","full_name":"Foth, Kilian","first_name":"Kilian"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"}],"date_updated":"2023-02-01T12:56:04Z","doi":"10.1075/is.12.1.06fis","type":"journal_article","status":"public","department":[{"_id":"749"}],"user_id":"14931","_id":"17233"},{"status":"public","abstract":[{"text":"Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback","lang":"eng"}],"type":"conference","publication":"Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)","language":[{"iso":"eng"}],"keyword":["Feedback","Human Robot Interaction","Prominence","Multimodal Action Segmentation"],"user_id":"14931","department":[{"_id":"749"}],"_id":"17244","citation":{"short":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: Interspeech 2011 (12th Annual Conference of the International Speech Communication Association), 2011, pp. 3105–3108.","mla":"Schillingmann, Lars, et al. “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios.” <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 2011, pp. 3105–08.","bibtex":"@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}, booktitle={Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}, author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}, year={2011}, pages={3105–3108} }","apa":"Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011). Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios. <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 3105–3108.","ieee":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios,” in <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 2011, pp. 3105–3108.","chicago":"Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and Katharina Rohlfing. “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios.” In <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 3105–8, 2011.","ama":"Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios. In: <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>. ; 2011:3105-3108."},"page":"3105-3108","year":"2011","title":"Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios","author":[{"first_name":"Lars","last_name":"Schillingmann","full_name":"Schillingmann, Lars"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"last_name":"Munier","full_name":"Munier, Christian","first_name":"Christian"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"}],"date_created":"2020-06-24T13:02:10Z","date_updated":"2023-02-01T12:53:54Z"},{"status":"public","type":"conference","language":[{"iso":"eng"}],"keyword":["Prominence","Multimodal Action Segmentation","Feedback","Color Saliency","Human Robot Interaction"],"user_id":"14931","department":[{"_id":"749"}],"_id":"17245","citation":{"ama":"Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Acoustic Packaging and the Learning of Words. In: ; 2011. doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>","ieee":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Acoustic Packaging and the Learning of Words,” 2011, doi: <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>.","chicago":"Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and Katharina Rohlfing. “Acoustic Packaging and the Learning of Words,” 2011. <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>.","bibtex":"@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Acoustic Packaging and the Learning of Words}, DOI={<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>}, author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}, year={2011} }","mla":"Schillingmann, Lars, et al. <i>Acoustic Packaging and the Learning of Words</i>. 2011, doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>.","short":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: 2011.","apa":"Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011). <i>Acoustic Packaging and the Learning of Words</i>. <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>"},"year":"2011","publication_identifier":{"issn":["1662-5188"]},"doi":"10.3389/conf.fncom.2011.52.00020","title":"Acoustic Packaging and the Learning of Words","date_created":"2020-06-24T13:02:11Z","author":[{"first_name":"Lars","last_name":"Schillingmann","full_name":"Schillingmann, Lars"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"first_name":"Christian","last_name":"Munier","full_name":"Munier, Christian"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"}],"date_updated":"2023-02-01T12:54:16Z"},{"citation":{"ieee":"A.-L. Vollmer <i>et al.</i>, “People modify their tutoring behavior in robot-directed interaction for action learning,” in <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 2009, pp. 1–6, doi: <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>.","chicago":"Vollmer, Anna-Lisa, Katrin Solveig Lohan, Kerstin Fischer, Yukie Nagai, Karola Pitsch, Jannik Fritsch, Katharina Rohlfing, and Britta Wrede. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” In <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. IEEE, 2009. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>.","apa":"Vollmer, A.-L., Lohan, K. S., Fischer, K., Nagai, Y., Pitsch, K., Fritsch, J., Rohlfing, K., &#38; Wrede, B. (2009). People modify their tutoring behavior in robot-directed interaction for action learning. <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>","ama":"Vollmer A-L, Lohan KS, Fischer K, et al. People modify their tutoring behavior in robot-directed interaction for action learning. In: <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>. IEEE; 2009:1-6. doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>","bibtex":"@inproceedings{Vollmer_Lohan_Fischer_Nagai_Pitsch_Fritsch_Rohlfing_Wrede_2009, title={People modify their tutoring behavior in robot-directed interaction for action learning}, DOI={<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>}, booktitle={Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}, publisher={IEEE}, author={Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}, year={2009}, pages={1–6} }","short":"A.-L. Vollmer, K.S. Lohan, K. Fischer, Y. Nagai, K. Pitsch, J. Fritsch, K. Rohlfing, B. Wrede, in: Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning, IEEE, 2009, pp. 1–6.","mla":"Vollmer, Anna-Lisa, et al. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, IEEE, 2009, pp. 1–6, doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>."},"page":"1-6","year":"2009","doi":"10.1109/DEVLRN.2009.5175516","title":"People modify their tutoring behavior in robot-directed interaction for action learning","date_created":"2020-06-24T13:02:43Z","author":[{"first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer"},{"first_name":"Katrin Solveig","last_name":"Lohan","full_name":"Lohan, Katrin Solveig"},{"first_name":"Kerstin","last_name":"Fischer","full_name":"Fischer, Kerstin"},{"full_name":"Nagai, Yukie","last_name":"Nagai","first_name":"Yukie"},{"full_name":"Pitsch, Karola","last_name":"Pitsch","first_name":"Karola"},{"last_name":"Fritsch","full_name":"Fritsch, Jannik","first_name":"Jannik"},{"first_name":"Katharina","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"publisher":"IEEE","date_updated":"2023-02-01T13:06:43Z","status":"public","abstract":[{"lang":"eng","text":"In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners."}],"type":"conference","publication":"Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning","language":[{"iso":"eng"}],"keyword":["robot simulation","hand movement velocity","robotic interaction partner","robotic agent","robot-directed interaction","multimodal analysis","Motionese","Motherese","intelligent tutoring systems","immature cognitive capability","human computer interaction","eye gaze","child-directed speech","child-directed motion","bottom-up system","bottom-up saliency-based attention model","adult-robot interaction","adult-child interaction","adult-adult interaction","human-robot interaction","action learning","social learning scenario","social robotics","software agents","top-down feedback structures","tutoring behavior"],"user_id":"14931","department":[{"_id":"749"}],"_id":"17272"},{"status":"public","type":"conference","publication":"Proceedings of the 4th ACM/IEEE international conference on Human robot interaction - HRI '09","keyword":["SINA","human robot interaction","biron"],"language":[{"iso":"eng"}],"_id":"17267","user_id":"14931","department":[{"_id":"749"}],"year":"2009","citation":{"chicago":"Lohse, Manja, Marc Hanheide, Katharina Rohlfing, and Gerhard Sagerer. “Systemic Interaction Analysis (SInA) in HRI.” In <i>Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction - HRI ’09</i>, 93–100, 2009. <a href=\"https://doi.org/10.1145/1514095.1514114\">https://doi.org/10.1145/1514095.1514114</a>.","ieee":"M. Lohse, M. Hanheide, K. Rohlfing, and G. Sagerer, “Systemic interaction analysis (SInA) in HRI,” in <i>Proceedings of the 4th ACM/IEEE international conference on Human robot interaction - HRI ’09</i>, 2009, pp. 93–100, doi: <a href=\"https://doi.org/10.1145/1514095.1514114\">10.1145/1514095.1514114</a>.","ama":"Lohse M, Hanheide M, Rohlfing K, Sagerer G. Systemic interaction analysis (SInA) in HRI. In: <i>Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction - HRI ’09</i>. ; 2009:93-100. doi:<a href=\"https://doi.org/10.1145/1514095.1514114\">10.1145/1514095.1514114</a>","apa":"Lohse, M., Hanheide, M., Rohlfing, K., &#38; Sagerer, G. (2009). Systemic interaction analysis (SInA) in HRI. <i>Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction - HRI ’09</i>, 93–100. <a href=\"https://doi.org/10.1145/1514095.1514114\">https://doi.org/10.1145/1514095.1514114</a>","mla":"Lohse, Manja, et al. “Systemic Interaction Analysis (SInA) in HRI.” <i>Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction - HRI ’09</i>, 2009, pp. 93–100, doi:<a href=\"https://doi.org/10.1145/1514095.1514114\">10.1145/1514095.1514114</a>.","short":"M. Lohse, M. Hanheide, K. Rohlfing, G. Sagerer, in: Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction - HRI ’09, 2009, pp. 93–100.","bibtex":"@inproceedings{Lohse_Hanheide_Rohlfing_Sagerer_2009, title={Systemic interaction analysis (SInA) in HRI}, DOI={<a href=\"https://doi.org/10.1145/1514095.1514114\">10.1145/1514095.1514114</a>}, booktitle={Proceedings of the 4th ACM/IEEE international conference on Human robot interaction - HRI ’09}, author={Lohse, Manja and Hanheide, Marc and Rohlfing, Katharina and Sagerer, Gerhard}, year={2009}, pages={93–100} }"},"page":"93-100","title":"Systemic interaction analysis (SInA) in HRI","doi":"10.1145/1514095.1514114","date_updated":"2023-02-01T13:05:01Z","author":[{"first_name":"Manja","last_name":"Lohse","full_name":"Lohse, Manja"},{"first_name":"Marc","full_name":"Hanheide, Marc","last_name":"Hanheide"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"},{"first_name":"Gerhard","last_name":"Sagerer","full_name":"Sagerer, Gerhard"}],"date_created":"2020-06-24T13:02:37Z"},{"user_id":"14931","department":[{"_id":"749"}],"_id":"17278","language":[{"iso":"eng"}],"keyword":["discursive behavior","autonomous robot","BIRON","man-machine systems","robot abilities","robot knowledge","user gestures","robot verbal feedback utterance","speech processing","user verbal behavior","service robots","human-robot interaction","human computer interaction","gesture recognition"],"type":"conference","status":"public","abstract":[{"lang":"eng","text":"This paper investigates the influence of feedback provided by an autonomous robot (BIRON) on users’ discursive behavior. A user study is described during which users show objects to the robot. The results of the experiment indicate, that the robot’s verbal feedback utterances cause the humans to adapt their own way of speaking. The changes in users’ verbal behavior are due to their beliefs about the robots knowledge and abilities. In this paper they are identified and grouped. Moreover, the data implies variations in user behavior regarding gestures. Unlike speech, the robot was not able to give feedback with gestures. Due to the lack of feedback, users did not seem to have a consistent mental representation of the robot’s abilities to recognize gestures. As a result, changes between different gestures are interpreted to be unconscious variations accompanying speech."}],"date_created":"2020-06-24T13:02:49Z","author":[{"first_name":"Manja","last_name":"Lohse","full_name":"Lohse, Manja"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"first_name":"Gerhard","last_name":"Sagerer","full_name":"Sagerer, Gerhard"}],"date_updated":"2023-02-01T13:08:20Z","doi":"10.1109/ROBOT.2008.4543743","title":"“Try something else!” — When users change their discursive behavior in human-robot interaction","publication_identifier":{"isbn":["1050-4729"]},"citation":{"apa":"Lohse, M., Rohlfing, K., Wrede, B., &#38; Sagerer, G. (2008). <i>“Try something else!” — When users change their discursive behavior in human-robot interaction</i>. 3481–3486. <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">https://doi.org/10.1109/ROBOT.2008.4543743</a>","short":"M. Lohse, K. Rohlfing, B. Wrede, G. Sagerer, in: 2008, pp. 3481–3486.","bibtex":"@inproceedings{Lohse_Rohlfing_Wrede_Sagerer_2008, title={“Try something else!” — When users change their discursive behavior in human-robot interaction}, DOI={<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>}, author={Lohse, Manja and Rohlfing, Katharina and Wrede, Britta and Sagerer, Gerhard}, year={2008}, pages={3481–3486} }","mla":"Lohse, Manja, et al. <i>“Try Something Else!” — When Users Change Their Discursive Behavior in Human-Robot Interaction</i>. 2008, pp. 3481–86, doi:<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>.","ama":"Lohse M, Rohlfing K, Wrede B, Sagerer G. “Try something else!” — When users change their discursive behavior in human-robot interaction. In: ; 2008:3481-3486. doi:<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>","chicago":"Lohse, Manja, Katharina Rohlfing, Britta Wrede, and Gerhard Sagerer. “‘Try Something Else!’ — When Users Change Their Discursive Behavior in Human-Robot Interaction,” 3481–86, 2008. <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">https://doi.org/10.1109/ROBOT.2008.4543743</a>.","ieee":"M. Lohse, K. Rohlfing, B. Wrede, and G. Sagerer, “‘Try something else!’ — When users change their discursive behavior in human-robot interaction,” 2008, pp. 3481–3486, doi: <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>."},"page":"3481-3486","year":"2008"}]
