@article{17289,
  abstract     = {{Robots have to deal with an enormous amount of sensory stimuli. One solution in making sense of them is to enable a robot system to actively search for cues that help structuring the information. Studies with infants reveal that parents support the learning-process by modifying their interaction style, dependent on their child's developmental age. In our study, in which parents demonstrated everyday actions to their preverbal children (8-11 months old), our aim was to identify objective parameters for multimodal action modification. Our results reveal two action parameters being modified in adult-child interaction: roundness and pace. Furthermore, we found that language has the power to help children structuring actions sequences by synchrony and emphasis. These insights are discussed with respect to the built-in attention architecture of a socially interactive robot, which enables it to understand demonstrated actions. Our algorithmic approach towards automatically detecting the task structure in child-designed input demonstrates the potential impact of insights from developmental learning on robotics. The presented findings pave the way to automatically detect when to imitate in a demonstration}},
  author       = {{Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta and Jungmann, Tanja}},
  issn         = {{1568-5535}},
  journal      = {{Advanced Robotics}},
  keywords     = {{multi-modal motherese, child-directed input, motionese, learning mechanisms}},
  number       = {{10}},
  pages        = {{1183--1199}},
  publisher    = {{VSP BV}},
  title        = {{{How can multimodal cues from child-directed interaction reduce learning complexity in robots?}}},
  doi          = {{10.1163/156855306778522532}},
  volume       = {{20}},
  year         = {{2006}},
}

