@inproceedings{17282,
  abstract     = {{In recent years industrial robots have been successfully established because they fulfil meaningful tasks in production. In contrast the question of applications for social robots is still open. For quite some time they have only been used in research or at best as simple toys by real users in everyday life situations. However, we suggest that there are still unknown application fields that are suitable for existing robots. Therefore, our approach is to show short movies and descriptions of real robots to participants and ask whether there are any specific tasks these robots could perform in the naive users' everyday life. The systems' appearance and abilities strongly influence the user's expectations, that's why we suppose that we will find strong differences between zoomorphic robots like AIBO and iCat and other robots like BIRON (functional design) and BARTHOC (humanoid). We have conducted an online study with more than 100 participants to test this hypothesis.}},
  author       = {{Lohse, Manja and Hegel, Frank and Swadzba, Agnes and Rohlfing, Katharina and Wachsmuth, Sven and Wrede, Britta}},
  booktitle    = {{Workshop on The Reign of Catz and Dogz? The role of virtual creatures in a computerised society}},
  pages        = {{121--126}},
  title        = {{{What can I do for you? Appearance and Application of Robots}}},
  year         = {{2007}},
}

@inproceedings{17283,
  abstract     = {{This paper presents a new insight from a computational analysis of parental actions. Developmental behavioral studies have suggested that parental modifications in their actions directed to infants versus to adults may aid the infants’ processing of the actions. We have been analyzing parental actions using a bottom-up attention model so as to take the advantage in robot action learning. Our latest result indicates that parental social signals can be used for a robot to detect significant state changes in the demonstrated action.}},
  author       = {{Nagai, Yukie and Rohlfing, Katharina}},
  booktitle    = {{The 7th International Conference on Epigenetic Robotics}},
  title        = {{{Parental Signal Indicating Significant State Change in Action Demonstration}}},
  year         = {{2007}},
}

@inproceedings{17281,
  author       = {{Rohlfing, Katharina and Kopp, Stefan}},
  booktitle    = {{Proc. Symposium on Language & Robots}},
  pages        = {{79--82}},
  title        = {{{Meaning in the timing? The emergence of complex pointing patterns}}},
  year         = {{2007}},
}

@inbook{20232,
  author       = {{Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta and Jungmann, Tanja}},
  booktitle    = {{Advanced Robotics}},
  pages        = {{1183--1199}},
  title        = {{{How can multimodal cues from child-directed interaction reduce learning complexity in robots?}}},
  doi          = {{10.1163/156855306778522532}},
  volume       = {{20}},
  year         = {{2006}},
}

@article{17290,
  abstract     = {{Forty Polish children aged between 1;8 and 2;3 were trained over two training sessions In the comprehension of the preposition POD [UNDER]. In the experiment, two variables were manipulated: a within Subjects variable of SITUATION and a between subjects variable of LINGUISTIC INPUT. The effect of situation could be found in all trained groups, showing that children's understanding in familiar and transfer Situations differed significantly from the performance of the control group in the post-test. The results further Suggest that depending on the type of a situation, the linguistic task requires different word learning abilities from a child.}},
  author       = {{Rohlfing, Katharina}},
  issn         = {{1469-7602}},
  journal      = {{Journal of Child Language}},
  number       = {{01}},
  pages        = {{51--69}},
  publisher    = {{CAMBRIDGE UNIV PRESS}},
  title        = {{{Facilitating the acquisition of UNDER by means of IN and ON - a training study in Polish}}},
  doi          = {{10.1017/S0305000905007257}},
  volume       = {{33}},
  year         = {{2006}},
}

@article{17289,
  abstract     = {{Robots have to deal with an enormous amount of sensory stimuli. One solution in making sense of them is to enable a robot system to actively search for cues that help structuring the information. Studies with infants reveal that parents support the learning-process by modifying their interaction style, dependent on their child's developmental age. In our study, in which parents demonstrated everyday actions to their preverbal children (8-11 months old), our aim was to identify objective parameters for multimodal action modification. Our results reveal two action parameters being modified in adult-child interaction: roundness and pace. Furthermore, we found that language has the power to help children structuring actions sequences by synchrony and emphasis. These insights are discussed with respect to the built-in attention architecture of a socially interactive robot, which enables it to understand demonstrated actions. Our algorithmic approach towards automatically detecting the task structure in child-designed input demonstrates the potential impact of insights from developmental learning on robotics. The presented findings pave the way to automatically detect when to imitate in a demonstration}},
  author       = {{Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta and Jungmann, Tanja}},
  issn         = {{1568-5535}},
  journal      = {{Advanced Robotics}},
  keywords     = {{multi-modal motherese, child-directed input, motionese, learning mechanisms}},
  number       = {{10}},
  pages        = {{1183--1199}},
  publisher    = {{VSP BV}},
  title        = {{{How can multimodal cues from child-directed interaction reduce learning complexity in robots?}}},
  doi          = {{10.1163/156855306778522532}},
  volume       = {{20}},
  year         = {{2006}},
}

@inproceedings{17286,
  author       = {{Wrede, Britta and Rohlfing, Katharina and Nagai, Yukie}},
  booktitle    = {{5th Biennial International Conference on Infant Studies (ICIS 2006)}},
  title        = {{{How to make sense of environmental interaction and dynamics. Symposium: Models of infant development: Are we really serious about environmental interaction and dynamics?}}},
  year         = {{2006}},
}

@article{17288,
  author       = {{Rohlfing, Katharina and Loehr, D. and Duncan, S. and Brown, A. and Franklin, A. and Kimbara, I. and Milde, J.-T. and Parrill, F. and Rose, T. and Schmidt, T. and Sloetjes, H. and Thies, A. and Wellinghoff, S.}},
  journal      = {{Gesprächsforschung}},
  title        = {{{Comparison of multimodal annotation tools: Workshop report}}},
  volume       = {{7}},
  year         = {{2006}},
}

@inproceedings{17287,
  author       = {{Muhl, Claudia and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{Proceedings of the Workshop on How people talk to computers, robots, and other artificial communication partners}},
  editor       = {{Fischer, Kerstin}},
  title        = {{{Factors influencing feedback}}},
  year         = {{2006}},
}

@article{17294,
  author       = {{Rohlfing, Katharina}},
  issn         = {{1940-7750}},
  journal      = {{Perspectives on Language Learning and Education}},
  number       = {{3}},
  pages        = {{13--17}},
  publisher    = {{American Speech Language Hearing Association}},
  title        = {{{Learning prepositions}}},
  doi          = {{10.1044/lle12.3.13}},
  volume       = {{12}},
  year         = {{2005}},
}

@inproceedings{17295,
  author       = {{Fritsch, Jannik and Hofemann, Nils and Rohlfing, Katharina}},
  booktitle    = {{Proc. IEEE ICRA}},
  publisher    = {{IEEE}},
  title        = {{{Detecting ‘When to Imitate’ in a Social Context with a Human Caregiver}}},
  year         = {{2005}},
}

@inproceedings{17293,
  author       = {{Loehr, D. and Duncan, S. and Rohlfing, Katharina}},
  booktitle    = {{Symposium at the Congress of International Society for Gesture Studies Interacting Bodies 2005}},
  title        = {{{How analysis shapes phenomena}}},
  year         = {{2005}},
}

@inproceedings{17292,
  author       = {{Rohlfing, Katharina}},
  booktitle    = {{Proceedings of the 5th Workshop on Language and Space (Workshop on Spatial Language and Dialogue)}},
  title        = {{{Pointing to spatial relations in mother-child dialogue}}},
  year         = {{2005}},
}

@inproceedings{17291,
  author       = {{Rohlfing, Katharina and Brand, R. J. and Gogate, L. J.}},
  booktitle    = {{Symposium at the X. International Congress for Studies in Child Language IASCL 2005}},
  title        = {{{Multimodal Motherese}}},
  year         = {{2005}},
}

@inproceedings{17296,
  abstract     = {{One dream of robotics research is to build robot companions that can interact outside the lab in real world environments such as private homes. There has been good progress on many components needed for such a robot companion, but only few systems are documented in the literature that actually integrate a larger number of components leading to a more natural and human-like interaction with such a robot. However, only the integration of many components on the same robot allows us to study embodied interaction and leads to new insights on how to improve the overall appearance of such a robot companion. Towards this end, we present the Bielefeld Robot Companion BIRON as an integration platform for studying embodied interaction. Reporting different stages of the alternating development and evaluation process, we argue that an integrated and actually running system is necessary to assess human needs and demands under real life conditions and to determine what functions are still missing. This interplay between evaluation and development stimulates the development process as well as the design of appropriate evaluation metrics. Moreover, such constant evaluations of the system help identify problematic aspects that need to be solved before sophisticated robot companions can be successfully evaluated in long-term user studies.}},
  author       = {{Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta}},
  booktitle    = {{Third International Conference on Development and Learning (ICDL 2004)}},
  isbn         = {{0-615-12704-5}},
  pages        = {{27}},
  title        = {{{Learning to Manipulate Objects: A Quantitative Evaluation of Motionese}}},
  year         = {{2004}},
}

@inbook{17297,
  author       = {{Rohlfing, Katharina}},
  booktitle    = {{Spracherwerb und Konzeptualisierung}},
  editor       = {{Haberzettl, Stefanie and Wegener, Heide}},
  isbn         = {{3-631-39265-6}},
  pages        = {{35--48}},
  publisher    = {{Peter Lang}},
  title        = {{{Situierte Semantik. Die Rolle der Sprache und nicht-verbaler Strategien beim Erwerb räumlicher Relationen}}},
  year         = {{2003}},
}

@article{17298,
  author       = {{Rohlfing, Katharina and Rehm, Matthias and Goecke, Karl Ulrich}},
  issn         = {{1567-7095}},
  journal      = {{Journal of Cognition and Culture}},
  number       = {{2}},
  pages        = {{132--157}},
  publisher    = {{Brill}},
  title        = {{{Situatedness: The interplay between context(s) and situation}}},
  volume       = {{3}},
  year         = {{2003}},
}

@inproceedings{17558,
  abstract     = {{The capability to coordinate and interrelate speech and vision is a virtual prerequisite for adaptive, cooperative, and flexible interaction among people. It is therefore to assume that human-machine interaction, too, would benefit from intelligent interfaces for integrated speech and image processing. In this paper, we first sketch an interactive system that integrates automatic speech processing with image understanding. Then, we concentrate on performance assessment which we believe is an emerging key issue in multimodal interaction. We explain the benefit of time scale analysis and usability studies and evaluate our system accordingly.}},
  author       = {{Bauckhage, Christian and Fritsch, Jannik and Rohlfing, Katharina and Wachsmuth, Sven and Sagerer, Gerhard}},
  booktitle    = {{Proc. IEEE International Conference on Multimodal Interfaces (ICMI’02)}},
  pages        = {{9--14}},
  title        = {{{Evaluating Integrated Speech- and Image Understanding}}},
  year         = {{2002}},
}

@inproceedings{17299,
  abstract     = {{The capability to coordinate and interrelate speech and vision is a virtual prerequisite for adaptive, cooperative, and flexible interaction among people. It is therefore to assume that human-machine interaction, too, would benefit from intelligent interfaces for integrated speech and image processing. In this paper, we first sketch an interactive system that integrates automatic speech processing with image understanding. Then, we concentrate on performance assessment which we believe is an emerging key issue in multimodal interaction. We explain the benefit of time scale analysis and usability studies and evaluate our system accordingly.}},
  author       = {{Bauckhage, Christian and Fritsch, Jannik and Rohlfing, Katharina and Wachsmuth, Sven and Sagerer, Gerhard}},
  booktitle    = {{Proc. IEEE International Conference on Multimodal Interfaces (ICMI’02)}},
  pages        = {{9--14}},
  title        = {{{Evaluating Integrated Speech- and Image Understanding}}},
  year         = {{2002}},
}

