@article{17236,
  abstract     = {{The behavior for a humanoid robot is often modeled in accordance with human behavior. Current research suggests that analyzing infant behavior as a basis for designing the robot behavior can guide us to a natural robot interface. Based on this idea many researchers support saliency systems as a bottom-up inspired way to simulate infant-like gazing behavior. In the field of saliency systems many different approaches have proposed and quantified in terms of speed, quality and other technical issues. But so far, no one compared and quantified them in terms of natural infant tutor interaction. The question we would like to address in this paper is: Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations? By addressing these issues we want to take a step towards an autonomous robot system, which could be used more natural interaction experiments in future.}},
  author       = {{Narayan, Vikram and Lohan, Katrin Solveig and Tscherepanow, Marko and Rohlfing, Katharina and Wrede, Britta}},
  issn         = {{1662-5188}},
  journal      = {{Frontiers in Computational Neuroscience}},
  keywords     = {{child gazing behavior, computer vision, saliency, development}},
  number       = {{35}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations?}}},
  doi          = {{10.3389/conf.fncom.2011.52.00035}},
  volume       = {{5}},
  year         = {{2011}},
}

