@inproceedings{17244,
  abstract     = {{Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback}},
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}},
  keywords     = {{Feedback, Human Robot Interaction, Prominence, Multimodal Action Segmentation}},
  pages        = {{3105--3108}},
  title        = {{{Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}}},
  year         = {{2011}},
}

@inproceedings{17245,
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  issn         = {{1662-5188}},
  keywords     = {{Prominence, Multimodal Action Segmentation, Feedback, Color Saliency, Human Robot Interaction}},
  title        = {{{Acoustic Packaging and the Learning of Words}}},
  doi          = {{10.3389/conf.fncom.2011.52.00020}},
  year         = {{2011}},
}

