@article{17192,
  abstract     = {{In order for artificial intelligent systems to interact naturally with human users, they need to be able to learn from human instructions when actions should be imitated. Human tutoring will typically consist of action demonstrations accompanied by speech. In the following, the characteristics of human tutoring during action demonstration will be examined. A special focus will be put on the distinction between two kinds of motion events: path-oriented actions and manner-oriented actions. Such a distinction is inspired by the literature pertaining to cognitive linguistics, which indicates that the human conceptual system can distinguish these two distinct types of motion. These two kinds of actions are described in language by more path-oriented or more manner-oriented utterances. In path-oriented utterances, the source, trajectory, or goal is emphasized, whereas in manner-oriented utterances the medium, velocity, or means of motion are highlighted. We examined a video corpus of adult-child interactions comprised of three age groups of children-pre-lexical, early lexical, and lexical-and two different tasks, one emphasizing manner more strongly and one emphasizing path more strongly. We analyzed the language and motion of the caregiver and the gazing behavior of the child to highlight the differences between the tutoring and the acquisition of the manner and path concepts. The results suggest that age is an important factor in the development of these action categories. The analysis of this corpus has also been exploited to develop an intelligent robotic behavior -the tutoring spotter system-able to emulate children's behaviors in a tutoring situation, with the aim of evoking in human subjects a natural and effective behavior in teaching to a robot. The findings related to the development of manner and path concepts have been used to implement new effective feedback strategies in the tutoring spotter system, which should provide improvements in human-robot interaction.}},
  author       = {{Lohan, Katrin S. and Griffiths, Sascha and Sciutti, Alessandra and Partmann, Tim C. and Rohlfing, Katharina}},
  issn         = {{1756-8757}},
  journal      = {{Topics in Cognitive Science}},
  keywords     = {{Imitation, Tutoring, Adult-child interaction, Human-robot interaction, Semantics, Teachable robots}},
  number       = {{3}},
  pages        = {{492--512}},
  publisher    = {{Wiley-Blackwell}},
  title        = {{{Co-development of manner and path concepts in language, action, and eye-gaze behavior}}},
  doi          = {{10.1111/tops.12098}},
  volume       = {{6}},
  year         = {{2014}},
}

@article{17199,
  abstract     = {{Research of tutoring in parent-infant interaction has shown that tutors - when presenting some action - modify both their verbal and manual performance for the learner (‘motherese’, ‘motionese’). Investigating the sources and effects of the tutors’ action modifications, we suggest an interactional account of ‘motionese’. Using video-data from a semi-experimental study in which parents taught their 8 to 11 month old infants how to nest a set of differently sized cups, we found that the tutors’ action modifications (in particular: high arches) functioned as an orienting device to guide the infant’s visual attention (gaze). Action modification and the recipient’s gaze can be seen to have a reciprocal sequential relationship and to constitute a constant loop of mutual adjustments. Implications are discussed for developmental research and for robotic ‘Social Learning’. We argue that a robot system could use on-line feedback strategies (e.g. gaze) to pro-actively shape a tutor’s action presentation as it emerges.}},
  author       = {{Pitsch, Karola and Vollmer, Anna-Lisa and Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{conversation analysis, interactional coordination, adult-child-interaction, feedback, gaze, quantification, social learning, motionese, tutoring}},
  number       = {{1}},
  pages        = {{55--98}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Tutoring in adult-child-interaction: On the loop of the tutor's action modification and the recipient's gaze}}},
  doi          = {{10.1075/is.15.1.03pit}},
  volume       = {{15}},
  year         = {{2014}},
}

@inproceedings{17253,
  author       = {{Vollmer, Anna-Lisa and Pitsch, Karola and Lohan, Katrin Solveig and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning (ICDL), 2010 IEEE 9th International Conference on Development and Learning}},
  keywords     = {{tutoring interaction, social interaction, video signal processing, robot systems, paediatrics, neurophysiology, Learning, infant, feedback, biology computing, cognitive capabilities, cognition, children}},
  pages        = {{76--81}},
  title        = {{{Developing feedback: How children of different age contribute to a tutoring interaction with adults}}},
  year         = {{2010}},
}

@inproceedings{17257,
  abstract     = {{In developmental research, tutoring behavior has been identified as scaffolding infants’ learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. Contingency describes situations in which two agents socially interact with each other and Csibra and Gergely showed that contingency is a char- acteristic aspect of social interaction [3]. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. Here we present results con- cerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. Our results reveal significant differences between Adult-Child Interaction (ACI), Adult-Adult Interaction (AAI) and Adult-Robot Interaction (ARI) in eye gaze behavior suggesting that contingency is impaired in the analyzed ARI situation.}},
  author       = {{Lohan, Katrin Solveig and Rohlfing, Katharina and Wrede, Britta}},
  keywords     = {{Eyegaze, tutoring situations, Contingency}},
  title        = {{{Analysing the effect of contingency in tutoring situations}}},
  year         = {{2009}},
}

@inproceedings{17272,
  abstract     = {{In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners.}},
  author       = {{Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}},
  keywords     = {{robot simulation, hand movement velocity, robotic interaction partner, robotic agent, robot-directed interaction, multimodal analysis, Motionese, Motherese, intelligent tutoring systems, immature cognitive capability, human computer interaction, eye gaze, child-directed speech, child-directed motion, bottom-up system, bottom-up saliency-based attention model, adult-robot interaction, adult-child interaction, adult-adult interaction, human-robot interaction, action learning, social learning scenario, social robotics, software agents, top-down feedback structures, tutoring behavior}},
  pages        = {{1--6}},
  publisher    = {{IEEE}},
  title        = {{{People modify their tutoring behavior in robot-directed interaction for action learning}}},
  doi          = {{10.1109/DEVLRN.2009.5175516}},
  year         = {{2009}},
}

@inproceedings{4496,
  author       = {{Heinze, Nina and Sporer, Thomas and Jenert, Tobias}},
  booktitle    = {{Deutsche Gesellschaft für Informationswissenschaft und Informationspraxis: Tagungen der Deutschen Gesellschaft für Informationswissenschaft und Informationspraxis}},
  isbn         = {{978-3-925474-60-6}},
  keywords     = {{Wissenschaftliches Arbeiten, Blended Learning, Tutoring}},
  location     = {{Frankfurt am Main }},
  pages        = {{319--328}},
  title        = {{{Semivirtuelle Lernumgebung zum wissenschaftlichen Arbeiten als Ergänzung des Studienangebots der Universität Augsburg}}},
  volume       = {{9}},
  year         = {{2007}},
}

