@inbook{17206, abstract = {{If they are to learn and interact with humans, robots need to understand actions and make use of language in social interactions. Hirsh-Pasek and Golinkoff (1996) have emphasized the use of language to learn actions when introducing the idea of acoustic packaging in human development. This idea suggests that acoustic information, typically in the form of narration, overlaps with action sequences, thereby providing infants with a bottom-up guide to attend to relevant parts and to find structure within them. The authors developed a computational model of the multimodal interplay of action and language in tutoring situations. This chapter presents the results of applying this model to multimodal parent-infant interaction data. Results are twofold and indicate that (a) infant-directed interaction is more structured than adult-directed interaction in that it contains more packages, and these packages have fewer motion segments; and (b) the synchronous structure within infant-directed packages contains redundant information making it possible to solve the reference problem when tying color adjectives to a moving object.}}, author = {{Wrede, Britta and Schillingmann, Lars and Rohlfing, Katharina}}, booktitle = {{Theoretical and Computational Models of Word Learning: Trends in Psychology and Artificial Intelligence}}, editor = {{Gogate, Lakshmi and Hollich, George}}, isbn = {{1466629738}}, pages = {{224--240}}, publisher = {{IGI Global}}, title = {{{Making Use of Multi-Modal Synchrony: A Model of Acoustic Packaging to Tie Words to Actions}}}, doi = {{10.4018/978-1-4666-2973-8.ch010}}, year = {{2013}}, }