@inproceedings{17272, abstract = {{In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners.}}, author = {{Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}}, booktitle = {{Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}}, keywords = {{robot simulation, hand movement velocity, robotic interaction partner, robotic agent, robot-directed interaction, multimodal analysis, Motionese, Motherese, intelligent tutoring systems, immature cognitive capability, human computer interaction, eye gaze, child-directed speech, child-directed motion, bottom-up system, bottom-up saliency-based attention model, adult-robot interaction, adult-child interaction, adult-adult interaction, human-robot interaction, action learning, social learning scenario, social robotics, software agents, top-down feedback structures, tutoring behavior}}, pages = {{1--6}}, publisher = {{IEEE}}, title = {{{People modify their tutoring behavior in robot-directed interaction for action learning}}}, doi = {{10.1109/DEVLRN.2009.5175516}}, year = {{2009}}, }