@article{17556,
  abstract     = {{Applying an eye-tracking technique, we tested early verb understanding in 48 infants aged 9 and 10months. Infants saw two objects presented side by side and heard a verb that referred to a common action with one of these objects (e.g., eating relating to a banana). The verbs were spoken by the parent in an interrogative manner in order to elicit a looking behavior in the infant. Results showed that 9-month-old infants did not show recognition of our test words. However, 10-month-old infants were able to understand a number of the tested verbs. In the discussion, we relate our findings to the nature of early verb representations.}},
  author       = {{Nomikou, Iris and Rohlfing, Katharina and Cimiano, Philipp and Mandler, Jean M.}},
  issn         = {{1547-3341}},
  journal      = {{LANGUAGE LEARNING AND DEVELOPMENT}},
  number       = {{1}},
  pages        = {{64--74}},
  publisher    = {{ROUTLEDGE JOURNALS}},
  title        = {{{Evidence for Early Comprehension of Action Verbs}}},
  doi          = {{10.1080/15475441.2018.1520639}},
  volume       = {{15}},
  year         = {{2019}},
}

@article{20243,
  author       = {{Rohlfing, Katharina and Leonardi, Giuseppe and Nomikou, Iris and Rączaszek-Leonardi, Joanna and Hüllermeier, Eyke}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  title        = {{{Multimodal Turn-Taking: Motivations, Methodological Challenges, and Novel Approaches}}},
  doi          = {{10.1109/TCDS.2019.2892991}},
  year         = {{2019}},
}

@article{20242,
  author       = {{Siebert, Scarlet and Tolksdorf, Nils Frederik and Rohlfing, Katharina and Zorn, Isabel}},
  journal      = {{The Journal of Communication and Media Studies }},
  pages        = {{21--35}},
  title        = {{{Raising Robotic Natives?: Persuasive Potentials of Social Robots in Early Education}}},
  doi          = {{10.18848/2470-9247/CGP/v04i04/21-35}},
  year         = {{2019}},
}

@article{17178,
  abstract     = {{Applying an eye-tracking technique, we tested early verb understanding in 48 infants aged 9 and 10months. Infants saw two objects presented side by side and heard a verb that referred to a common action with one of these objects (e.g., eating relating to a banana). The verbs were spoken by the parent in an interrogative manner in order to elicit a looking behavior in the infant. Results showed that 9-month-old infants did not show recognition of our test words. However, 10-month-old infants were able to understand a number of the tested verbs. In the discussion, we relate our findings to the nature of early verb representations.}},
  author       = {{Nomikou, Iris and Rohlfing, Katharina and Cimiano, Philipp and Mandler, Jean M.}},
  issn         = {{1547-3341}},
  journal      = {{LANGUAGE LEARNING AND DEVELOPMENT}},
  number       = {{1}},
  pages        = {{64--74}},
  publisher    = {{ROUTLEDGE JOURNALS}},
  title        = {{{Evidence for Early Comprehension of Action Verbs}}},
  doi          = {{10.1080/15475441.2018.1520639}},
  volume       = {{15}},
  year         = {{2019}},
}

@article{24905,
  author       = {{Siebert, Scarlet and Tolksdorf, Nils Frederik and Rohlfing, Katharina J. and Zorn, Isabel}},
  issn         = {{2470-9247}},
  journal      = {{The Journal of Communication and Media Studies}},
  pages        = {{21--35}},
  title        = {{{Raising Robotic Natives?: Persuasive Potentials of Social Robots in Early Education}}},
  doi          = {{10.18848/2470-9247/cgp/v04i04/21-35}},
  year         = {{2019}},
}

@inproceedings{24923,
  author       = {{Tolksdorf, Nils Frederik and Rohlfing, Katharina J.}},
  location     = {{Manchester, UK}},
  title        = {{{Reconceptualising early childhood literacy facing child-robot interaction}}},
  year         = {{2019}},
}

@article{28981,
  abstract     = {{What does it mean to explain data patterns? Cognitive psychologists and other scientists face this question when observable phenomena have to be explained in theoretical terms. Frequentist null-hypothesis testing – one prominent approach in psychology – controls error rates. Machine learning – an alternative prominent outside of, but not yet inside psychology – focuses on precise predictions. However, both alternatives often provide little insight into the data. We propose a combination of formal modeling and Bayesian statistical inference to ground explanations in data analysis. We support this approach by reference to philosophy of science and discussions of the current methods crisis in several empirical sciences and illustrate it with an example from visual attention research.}},
  author       = {{Krüger, Alexander and Tünnermann, Jan and Rohlfing, Katharina and Scharlau, Ingrid}},
  journal      = {{Archives of Data Science, Series A}},
  pages        = {{1 -- 27}},
  title        = {{{Quantitative Explanation as a Tight Coupling of Data, Model, and Theory}}},
  doi          = {{10.5445/KSP/1000087327/10}},
  volume       = {{5}},
  year         = {{2018}},
}

@inproceedings{17557,
  abstract     = {{Previous work by [1] studied gesture-speech interaction in adults. [1] focussed on temporal and semantic coordination of gesture and speech and found that while adult speech is mostly coordinated (or redundant) with gestures, semantic coordination increases the temporal synchrony. These observations do not necessarily hold for children (in particular with respect to iconic gestures, see [2]), where the speech and gesture systems are still under development. We studied the semantic and temporal coordination of speech and gesture in 4-year old children using a corpus of 40 children producing action descriptions in task oriented dialogues. In particular, we examined what kinds of information are transmitted verbally vs. non-verbally and how they are related. To account for this, we extended the semantic features (SFs) developed in [3] for object descriptions in order to include the semantics of actions. We coded the SFs on the children’s speech and gestures separately using video data. In our presentation, we will focus on the quantitative distribution of SFs across gesture and speech. Our results indicate that speech and gestures of 4-year olds are less integrated than those of the adults, although there is a large variability among the children. We will discuss the results with respect to the cognitive processes (e.g., visual memory, language) underlying children’s abilities at this stage of development. Our work paves the way for the cognitive architecture of speech-gesture interaction in preschoolers which to our knowledge is missing so far. }},
  author       = {{Abramov, Olga and Kopp, Stefan and Nemeth, Anne and Kern, Friederike and Mertens, Ulrich and Rohlfing, Katharina}},
  booktitle    = {{KOGWIS2018: Computational Approaches to Cognitive Science}},
  keywords     = {{Speech-gesture integration, semantic features}},
  title        = {{{Towards a Computational Model of Child Gesture-Speech Production}}},
  year         = {{2018}},
}

@inproceedings{24924,
  author       = {{Tolksdorf, Nils Frederik and Mertens, Ulrich J. and Rohlfing, Katharina J.}},
  location     = {{Istanbul, Turkey}},
  title        = {{{Multimodal response behavior of children during word learning with a robot}}},
  year         = {{2018}},
}

@article{20227,
  author       = {{Rączaszek-Leonardi,, Joanna and Nomikou, Iris and Rohlfing, Katharina and Deacon, Terrence W.}},
  journal      = {{Ecological Psychology }},
  pages        = {{39--73}},
  title        = {{{Language development from an ecological perspective: Ecologically valid ways to abstract symbols.}}},
  volume       = {{30}},
  year         = {{2018}},
}

@article{20225,
  author       = {{Nomikou, Iris and Rohlfing, Katharina and Cimiano, Philipp  and Mandler, Jean M. }},
  journal      = {{Language Learning and Development}},
  pages        = {{1--11}},
  title        = {{{Evidence for early comprehension of action verbs.}}},
  year         = {{2018}},
}

@inproceedings{17179,
  abstract     = {{Previous work by [1] studied gesture-speech interaction in adults. [1] focussed on temporal and semantic coordination of gesture and speech and found that while adult speech is mostly coordinated (or redundant) with gestures, semantic coordination increases the temporal synchrony. These observations do not necessarily hold for children (in particular with respect to iconic gestures, see [2]), where the speech and gesture systems are still under development. We studied the semantic and temporal coordination of speech and gesture in 4-year old children using a corpus of 40 children producing action descriptions in task oriented dialogues. In particular, we examined what kinds of information are transmitted verbally vs. non-verbally and how they are related. To account for this, we extended the semantic features (SFs) developed in [3] for object descriptions in order to include the semantics of actions. We coded the SFs on the children’s speech and gestures separately using video data. In our presentation, we will focus on the quantitative distribution of SFs across gesture and speech. Our results indicate that speech and gestures of 4-year olds are less integrated than those of the adults, although there is a large variability among the children. We will discuss the results with respect to the cognitive processes (e.g., visual memory, language) underlying children’s abilities at this stage of development. Our work paves the way for the cognitive architecture of speech-gesture interaction in preschoolers which to our knowledge is missing so far. }},
  author       = {{Abramov, Olga and Kopp, Stefan and Nemeth, Anne and Kern, Friederike and Mertens, Ulrich and Rohlfing, Katharina}},
  booktitle    = {{KOGWIS2018: Computational Approaches to Cognitive Science}},
  keywords     = {{Speech-gesture integration, semantic features}},
  title        = {{{Towards a Computational Model of Child Gesture-Speech Production}}},
  year         = {{2018}},
}

@inproceedings{24926,
  author       = {{Mertens, Ulrich J. and Bergmann, Kirsten and Tolksdorf, Nils Frederik and Rohlfing, Katharina J.}},
  location     = {{Cape Town, South Africa}},
  title        = {{{Can the reduction of an iconic gesture aid long-term learning? A pilot child-robot-study}}},
  year         = {{2018}},
}

@article{20250,
  author       = {{Gaspers, Judith and Cimiano, Philipp and Rohlfing, Katharina and Wrede, Britta}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  pages        = {{183--196}},
  title        = {{{Constructing a Language From Scratch: Combining Bottom–Up and Top–Down Learning Processes in a Computational Model of Language Acquisition}}},
  doi          = {{10.1109/TCDS.2016.2614958}},
  volume       = {{9}},
  year         = {{2017}},
}

@article{20245,
  author       = {{Nomikou, Iris and Leonardi, Giuseppe and Radkowska, Alicja and Rączaszek-Leonardi, Joanna and Rohlfing, Katharina}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  title        = {{{Taking Up an Active Role: Emerging Participation in Early Mother–Infant Interaction during Peekaboo Routines}}},
  doi          = {{10.3389/fpsyg.2017.01656}},
  year         = {{2017}},
}

@article{20246,
  author       = {{Nomikou, Iris and Koke, Monique and Rohlfing, Katharina}},
  journal      = {{Brain Sciences }},
  title        = {{{Verbs in mothers’ input to six-month-olds: Synchrony between presentation, meaning, and actions Is related to later verb acquisition.}}},
  doi          = {{10.3390/brainsci7050052}},
  year         = {{2017}},
}

@article{20204,
  author       = {{Grimminger, Angela and Rohlfing, Katharina}},
  journal      = {{Proceedings of the 6th International Workshop on Child Computer Interaction (WOCCI 2017)}},
  pages        = {{28--33}},
  title        = {{{“Can you teach me?” – Children teaching new words to a robot in a book reading scenario.}}},
  year         = {{2017}},
}

@article{17875,
  abstract     = {{Early identification of primary language delay is crucial to implement effective prevention programs. Available screening instruments are based on parents' reports and have only insufficient predictive validity. This study employed observational measures of preverbal infants' gestural communication to test its predictive validity for identifying later language delays. Pointing behavior of fifty-nine 12-month-old infants was analyzed and related to their language skills 1 year later. Results confirm predictive validity of preverbal communication for language skills with the hand shape of pointing being superior compared to the underlying motives for pointing (imperative vs. declarative). Twelve-month-olds who pointed only with their open hand but never with their index finger were at risk for primary language delay at 2 years of age.}},
  author       = {{Lüke, Carina and Grimminger, Angela and Rohlfing, Katharina and Liszkowski, Ulf and Ritterfeld, Ute}},
  issn         = {{1467-8624}},
  journal      = {{CHILD DEVELOPMENT}},
  number       = {{2}},
  pages        = {{484--492}},
  publisher    = {{Wiley}},
  title        = {{{In Infants' Hands: Identification of Preverbal Infants at Risk for Primary Language Delay}}},
  doi          = {{10.1111/cdev.12610}},
  volume       = {{88}},
  year         = {{2017}},
}

@article{20228,
  author       = {{Heller, Vivian and Rohlfing, Katharina}},
  journal      = {{Frontiers in Psychology}},
  title        = {{{Reference as an Interactive Achievement: Sequential and Longitudinal Analyses of Labeling Interactions in Shared Book Reading and Free Play}}},
  volume       = {{8}},
  year         = {{2017}},
}

@article{20229,
  author       = {{Rohlfing, Katharina and Ceurremans, Josefa and Horst, Jessica S. }},
  journal      = {{Communication Disorders Quarterly}},
  title        = {{{Benefits of repeated book readings in children with SLI}}},
  year         = {{2017}},
}

