@article{17192, abstract = {{In order for artificial intelligent systems to interact naturally with human users, they need to be able to learn from human instructions when actions should be imitated. Human tutoring will typically consist of action demonstrations accompanied by speech. In the following, the characteristics of human tutoring during action demonstration will be examined. A special focus will be put on the distinction between two kinds of motion events: path-oriented actions and manner-oriented actions. Such a distinction is inspired by the literature pertaining to cognitive linguistics, which indicates that the human conceptual system can distinguish these two distinct types of motion. These two kinds of actions are described in language by more path-oriented or more manner-oriented utterances. In path-oriented utterances, the source, trajectory, or goal is emphasized, whereas in manner-oriented utterances the medium, velocity, or means of motion are highlighted. We examined a video corpus of adult-child interactions comprised of three age groups of children-pre-lexical, early lexical, and lexical-and two different tasks, one emphasizing manner more strongly and one emphasizing path more strongly. We analyzed the language and motion of the caregiver and the gazing behavior of the child to highlight the differences between the tutoring and the acquisition of the manner and path concepts. The results suggest that age is an important factor in the development of these action categories. The analysis of this corpus has also been exploited to develop an intelligent robotic behavior -the tutoring spotter system-able to emulate children's behaviors in a tutoring situation, with the aim of evoking in human subjects a natural and effective behavior in teaching to a robot. The findings related to the development of manner and path concepts have been used to implement new effective feedback strategies in the tutoring spotter system, which should provide improvements in human-robot interaction.}}, author = {{Lohan, Katrin S. and Griffiths, Sascha and Sciutti, Alessandra and Partmann, Tim C. and Rohlfing, Katharina}}, issn = {{1756-8757}}, journal = {{Topics in Cognitive Science}}, keywords = {{Imitation, Tutoring, Adult-child interaction, Human-robot interaction, Semantics, Teachable robots}}, number = {{3}}, pages = {{492--512}}, publisher = {{Wiley-Blackwell}}, title = {{{Co-development of manner and path concepts in language, action, and eye-gaze behavior}}}, doi = {{10.1111/tops.12098}}, volume = {{6}}, year = {{2014}}, } @article{17199, abstract = {{Research of tutoring in parent-infant interaction has shown that tutors - when presenting some action - modify both their verbal and manual performance for the learner (‘motherese’, ‘motionese’). Investigating the sources and effects of the tutors’ action modifications, we suggest an interactional account of ‘motionese’. Using video-data from a semi-experimental study in which parents taught their 8 to 11 month old infants how to nest a set of differently sized cups, we found that the tutors’ action modifications (in particular: high arches) functioned as an orienting device to guide the infant’s visual attention (gaze). Action modification and the recipient’s gaze can be seen to have a reciprocal sequential relationship and to constitute a constant loop of mutual adjustments. Implications are discussed for developmental research and for robotic ‘Social Learning’. We argue that a robot system could use on-line feedback strategies (e.g. gaze) to pro-actively shape a tutor’s action presentation as it emerges.}}, author = {{Pitsch, Karola and Vollmer, Anna-Lisa and Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta}}, issn = {{1572-0381}}, journal = {{Interaction Studies}}, keywords = {{conversation analysis, interactional coordination, adult-child-interaction, feedback, gaze, quantification, social learning, motionese, tutoring}}, number = {{1}}, pages = {{55--98}}, publisher = {{John Benjamins Publishing Company}}, title = {{{Tutoring in adult-child-interaction: On the loop of the tutor's action modification and the recipient's gaze}}}, doi = {{10.1075/is.15.1.03pit}}, volume = {{15}}, year = {{2014}}, } @inproceedings{17259, abstract = {{Learning is a social endeavor, in which the learner generally receives support from his/her social partner(s). In developmental research – even though tutors/adults behavior modifications in their speech, gestures and motions have been extensively studied, studies barely consider the recipient’s (i.e. the child’s) perspective in the analysis of the adult’s presentation, In addition, the variability in parental behavior, i.e. the fact that not every parent modifies her/his behavior in the same way, found less fine-grained analysis. In contrast, in this paper, we assume an interactional perspective investigating the loop between the tutor’s and the learner’s actions. With this approach, we aim both at discovering the levels and features of variability and at achieving a better understanding of how they come about within the course of the interaction. For our analysis, we used a combination of (1) qualitative investigation derived from ethnomethodological Conversation Analysis (CA), (2) semi-automatic computational 2D hand tracking and (3) a mathematically based visualization of the data. Our analysis reveals that tutors not only shape their demonstrations differently with regard to the intended recipient per se (adult-directed vs. child-directed), but most importantly that the learner’s feedback during the presentation is consequential for the concrete ways in which the presentation is carried out.}}, author = {{Pitsch, Karola and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta and Rohlfing, Katharina and Sagerer, Gerhard}}, booktitle = {{Gesture and Speech in Interaction}}, keywords = {{gaze, gesture, Multimodal, adult-child interaction}}, title = {{{On the loop of action modification and the recipient's gaze in adult-child interaction}}}, year = {{2009}}, } @inproceedings{17272, abstract = {{In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners.}}, author = {{Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}}, booktitle = {{Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}}, keywords = {{robot simulation, hand movement velocity, robotic interaction partner, robotic agent, robot-directed interaction, multimodal analysis, Motionese, Motherese, intelligent tutoring systems, immature cognitive capability, human computer interaction, eye gaze, child-directed speech, child-directed motion, bottom-up system, bottom-up saliency-based attention model, adult-robot interaction, adult-child interaction, adult-adult interaction, human-robot interaction, action learning, social learning scenario, social robotics, software agents, top-down feedback structures, tutoring behavior}}, pages = {{1--6}}, publisher = {{IEEE}}, title = {{{People modify their tutoring behavior in robot-directed interaction for action learning}}}, doi = {{10.1109/DEVLRN.2009.5175516}}, year = {{2009}}, }