@book{17207,
  author       = {{Rohlfing, Katharina}},
  isbn         = {{978-3823367727}},
  pages        = {{215}},
  publisher    = {{Gunter Narr Verlag}},
  title        = {{{Frühkindliche Semantik. Eine Einführung}}},
  year         = {{2013}},
}

@inproceedings{17209,
  author       = {{Rohlfing, Katharina and Grimminger, Angela and Nachtigäller, Kerstin}},
  title        = {{{Which Semantic Synchrony?}}},
  year         = {{2013}},
}

@article{17210,
  author       = {{Rączaszek-Leonardi, Joanna and Nomikou, Iris and Rohlfing, Katharina}},
  issn         = {{1943-0612}},
  journal      = {{IEEE Transactions on Autonomous Mental Development}},
  number       = {{3}},
  pages        = {{210--221}},
  publisher    = {{Institute of Electrical & Electronics Engineers (IEEE)}},
  title        = {{{Young children's dialogical actions: The beginnings of purposeful intersubjectivity}}},
  doi          = {{10.1109/TAMD.2013.2273258}},
  volume       = {{5}},
  year         = {{2013}},
}

@article{17214,
  author       = {{Rohlfing, Katharina and Nomikou, Iris  and Pitsch, Karola }},
  issn         = {{1572-0373}},
  journal      = {{Interaction Studies}},
  number       = {{2}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Asymmetry and adaptation in social interaction: A micro-analytic perspective}}},
  doi          = {{10.1075/is.14.2.001nom}},
  volume       = {{14}},
  year         = {{2013}},
}

@article{17213,
  author       = {{Lehmden von, Friederike and Kauffeldt, Johanna and Belke, Eva and Rohlfing, Katharina}},
  issn         = {{2193-9152}},
  journal      = {{Praxis Sprache}},
  pages        = {{18--27}},
  publisher    = {{Schulz-Kirchner Verl.}},
  title        = {{{Das Vorlesen von Kinderbüchern als implizites Mittel zur Sprachförderung im Bereich Grammatik}}},
  volume       = {{1}},
  year         = {{2013}},
}

@inproceedings{17212,
  author       = {{Rohlfing, Katharina and Salas Poblete, Juana and Frank, Joublin}},
  issn         = {{2308-2275}},
  title        = {{{Learning new words in unfamiliar frames from direct and indirect teaching.}}},
  year         = {{2013}},
}

@inproceedings{17211,
  author       = {{Rączaszek-Leonardi, Joanna and Nomikou, Iris and Rohlfing, Katharina}},
  title        = {{{The Development of Purposeful Intersubjectivity}}},
  year         = {{2013}},
}

@article{17203,
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics}},
  number       = {{3}},
  pages        = {{313--323}},
  publisher    = {{Springer }},
  title        = {{{To err is human (-like): Effects of robot gesture on perceived anthropomorphism and likeability}}},
  doi          = {{10.1007/s12369-013-0196-9}},
  volume       = {{5}},
  year         = {{2013}},
}

@article{17215,
  author       = {{Schilling, Malte and Rohlfing, Katharina and Cruse, Holk}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  number       = {{405}},
  pages        = {{405}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Prediction as internal simulation: Taking chances in what to do next}}},
  doi          = {{10.3389/fpsyg.2012.00405}},
  volume       = {{3}},
  year         = {{2012}},
}

@article{17225,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@inproceedings{17228,
  abstract     = {{The paper investigates the effects of a robot’s “on-line” feedback during a tutoring situation with a human tutor. Analysis is based on a study conducted with an iCub robot that autonomously generates its feedback (gaze, pointing gesture) based on the system’s perception of the tutor’s actions using the idea of reciprocity of actions. Sequential micro-analysis of two opposite cases reveals how the robot’s behavior (responsive vs. non-responsive) pro-actively shapes the tutor’s conduct and thus co-produces the way in which it is being tutored. A dialogic and a monologic tutoring style are distinguished. The first 20 seconds of an encounter are found to shape the user’s perception and expectations of the system’s competences and lead to a relatively stable tutoring style even if the robot’s reactivity and appropriateness of feedback changes.}},
  author       = {{Pitsch, Karola and Lohan, Katrin Solveig and Rohlfing, Katharina and Saunders, Joe and Nehaniv, Chrystopher L. and Wrede, Britta}},
  booktitle    = {{Proceedings of the 21TH IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN 2012)}},
  pages        = {{974--981}},
  publisher    = {{IEEE}},
  title        = {{{Better be reactive at the beginning. Implications of the first seconds of an encounter for the tutoring style in human-robot-interaction}}},
  year         = {{2012}},
}

@inbook{17224,
  author       = {{Grimminger, Angela and Rohlfing, Katharina and Stenneken, Prisca}},
  booktitle    = {{Gesture and Multimodal Development}},
  editor       = {{Colletta, Jean-Marc and Guidetti, Michèle}},
  isbn         = {{9789027202581}},
  pages        = {{129--155}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Children's lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development [Reprint]}}},
  volume       = {{39}},
  year         = {{2012}},
}

@inproceedings{17226,
  author       = {{Szufnarowska, Joanna and Nomikou, Iris and Rohlfing, Katharina}},
  title        = {{{Educating each other's attention: Mothers' and infants' eye-contact within natural interactions at 3 and 6 months of age.}}},
  year         = {{2012}},
}

@inproceedings{17227,
  author       = {{Nomikou, Iris and Rohlfing, Katharina}},
  title        = {{{Adapting the input collaboratively: the development of multimodal practices in early mother-infant interactions.}}},
  year         = {{2012}},
}

@inproceedings{17223,
  author       = {{Nomikou, Iris and Lohan, Katrin Solveig and Rohlfing, Katharina}},
  title        = {{{Adaptive maternal synchrony: multimodal practices are tailored to infants' attention}}},
  year         = {{2012}},
}

@inproceedings{17229,
  author       = {{Nomikou, Iris and Lohan, Katrin Solveig and Rohlfing, Katharina}},
  title        = {{{Infants' gaze modulates maternal multimodal input: A study with 3-month-olds}}},
  year         = {{2012}},
}

@article{17428,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@article{17220,
  abstract     = {{Pointing, like eye gaze, is a deictic gesture that can be used to orient the attention of another person towards an object or an event. Previous research suggests that infants first begin to follow a pointing gesture between 10 and 13 months of age. We investigated whether sensitivity to pointing could be seen at younger ages employing a technique recently used to show early sensitivity to perceived eye gaze. Three experiments were conducted with 4.5- and 6.5-month-old infants. Our first goal was to examine whether these infants could show a systematic response to pointing by shifting their visual attention in the direction of a pointing gesture when we eliminated the difficulty of disengaging fixation from a pointing hand. The results from Experiments 1 and 2 suggest that a dynamic, but not a static, pointing gesture triggers shifts of visual attention in infants as young as 4.5 months of age. Our second goal was to clarify whether this response was based on sensitivity to the directional posture of the pointing hand, the motion of the pointing hand, or both. The results from Experiment 3 suggest that the direction of motion is necessary but not sufficient to orient infants’ attention toward a distal target. Infants shifted their attention in the direction of the pointing finger, but only when the hand was moving in the same direction. These results suggest that infants are prepared to orient to the distal referent of a pointing gesture which likely contributes to their learning the communicative function of pointing.}},
  author       = {{Rohlfing, Katharina and Longo, Matthew R. and Bertenthal, Bennett I.}},
  issn         = {{1363-755X}},
  journal      = {{Developmental Science}},
  number       = {{3}},
  pages        = {{426--435}},
  publisher    = {{Wiley-Blackwell}},
  title        = {{{Dynamic pointing triggers shifts of visual attention in young infants}}},
  doi          = {{10.1111/j.1467-7687.2012.01139.x}},
  volume       = {{15}},
  year         = {{2012}},
}

@inproceedings{17217,
  abstract     = {{We designed a laboratory study to investigate the influence of social interaction on category learning. The objective in the present study is to examine what kind of teaching behavior can improve an agent’s learning of categories. In a computer-based study participants learned four categories for sixteen objects which appear on a computer screen. The objects’ categories determine what kind of manipulation is to be done on the objects. Five tutors and twenty participants were recruited to participate. For the study the tutors were placed in front of a computer in one room whereas the learners were in another room. The learners’ task was to manipulate the objects appropriately through the instructions they received from the tutor on their screens via six symbols. These six symbols were the only way for the tutor to communicate with the learner. We call this a bottom-up learning as it relies entirely on the perception of the tutors’ symbols without any prior knowledge of their meaning. The focus in the present study is not on the ability by the learner to acquire knowledge of the categories but on the types of instructions that the tutor gave during the trials and the effects of the feedback given to the learner. Therefore, the feedback given by the tutors via the symbols was classified and quantified.}},
  author       = {{Griffiths, Sascha and Nolfi, S. and Morlino, G. and Schillingmann, Lars and Kühnel, Sina and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{ICDL-EpiRob 2012}},
  title        = {{{Bottom-Up Learning of Feedback in a Categorization Task}}},
  year         = {{2012}},
}

@article{17216,
  author       = {{Lohan, Katrin Solveig and Rohlfing, Katharina and Pitsch, Karola and Saunders, Joe and Lehmann, Hagen and Nehaniv, Chrystopher L. and Fischer, Kerstin and Wrede, Britta}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics}},
  keywords     = {{Social Robots, Contingency}},
  number       = {{2}},
  pages        = {{131--146}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Tutor spotter: Proposing a feature set and evaluating it in a robotic system}}},
  doi          = {{10.1007/s12369-011-0125-8}},
  volume       = {{4}},
  year         = {{2012}},
}

