---
_id: '63053'
author:
- first_name: Carlos
  full_name: Hernández, Carlos
  last_name: Hernández
- first_name: Angel E.
  full_name: Rodriguez-Fernandez, Angel E.
  last_name: Rodriguez-Fernandez
- first_name: Lennart
  full_name: Schäpermeier, Lennart
  last_name: Schäpermeier
- first_name: Oliver
  full_name: Cuate, Oliver
  last_name: Cuate
- first_name: Heike
  full_name: Trautmann, Heike
  id: '100740'
  last_name: Trautmann
  orcid: 0000-0002-9788-8282
- first_name: Oliver
  full_name: Schütze, Oliver
  last_name: Schütze
citation:
  ama: Hernández C, Rodriguez-Fernandez AE, Schäpermeier L, Cuate O, Trautmann H,
    Schütze O. An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions
    for Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary
    Computation</i>. Published online 2025:1-1. doi:<a href="https://doi.org/10.1109/TEVC.2025.3637276">10.1109/TEVC.2025.3637276</a>
  apa: Hernández, C., Rodriguez-Fernandez, A. E., Schäpermeier, L., Cuate, O., Trautmann,
    H., &#38; Schütze, O. (2025). An Evolutionary Approach for the Computation of
    ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization. <i>IEEE
    Transactions on Evolutionary Computation</i>, 1–1. <a href="https://doi.org/10.1109/TEVC.2025.3637276">https://doi.org/10.1109/TEVC.2025.3637276</a>
  bibtex: '@article{Hernández_Rodriguez-Fernandez_Schäpermeier_Cuate_Trautmann_Schütze_2025,
    title={An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions
    for Multi-Objective Multimodal Optimization}, DOI={<a href="https://doi.org/10.1109/TEVC.2025.3637276">10.1109/TEVC.2025.3637276</a>},
    journal={IEEE Transactions on Evolutionary Computation}, author={Hernández, Carlos
    and Rodriguez-Fernandez, Angel E. and Schäpermeier, Lennart and Cuate, Oliver
    and Trautmann, Heike and Schütze, Oliver}, year={2025}, pages={1–1} }'
  chicago: Hernández, Carlos, Angel E. Rodriguez-Fernandez, Lennart Schäpermeier,
    Oliver Cuate, Heike Trautmann, and Oliver Schütze. “An Evolutionary Approach for
    the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal
    Optimization.” <i>IEEE Transactions on Evolutionary Computation</i>, 2025, 1–1.
    <a href="https://doi.org/10.1109/TEVC.2025.3637276">https://doi.org/10.1109/TEVC.2025.3637276</a>.
  ieee: 'C. Hernández, A. E. Rodriguez-Fernandez, L. Schäpermeier, O. Cuate, H. Trautmann,
    and O. Schütze, “An Evolutionary Approach for the Computation of ∈-Locally Optimal
    Solutions for Multi-Objective Multimodal Optimization,” <i>IEEE Transactions on
    Evolutionary Computation</i>, pp. 1–1, 2025, doi: <a href="https://doi.org/10.1109/TEVC.2025.3637276">10.1109/TEVC.2025.3637276</a>.'
  mla: Hernández, Carlos, et al. “An Evolutionary Approach for the Computation of
    ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE
    Transactions on Evolutionary Computation</i>, 2025, pp. 1–1, doi:<a href="https://doi.org/10.1109/TEVC.2025.3637276">10.1109/TEVC.2025.3637276</a>.
  short: C. Hernández, A.E. Rodriguez-Fernandez, L. Schäpermeier, O. Cuate, H. Trautmann,
    O. Schütze, IEEE Transactions on Evolutionary Computation (2025) 1–1.
date_created: 2025-12-12T06:13:06Z
date_updated: 2025-12-12T06:13:51Z
department:
- _id: '819'
doi: 10.1109/TEVC.2025.3637276
keyword:
- Optimization
- Evolutionary computation
- Hands
- Proposals
- Convergence
- Computational efficiency
- Artificial intelligence
- Accuracy
- Approximation algorithms
- Aerospace electronics
- Multi-objective optimization
- evolutionary algorithms
- nearly optimal solutions
- multimodal optimization
- archiving
- continuation
language:
- iso: eng
page: 1-1
publication: IEEE Transactions on Evolutionary Computation
status: public
title: An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions
  for Multi-Objective Multimodal Optimization
type: journal_article
user_id: '15504'
year: '2025'
...
---
_id: '56221'
author:
- first_name: Angel E.
  full_name: Rodriguez-Fernandez, Angel E.
  last_name: Rodriguez-Fernandez
- first_name: Lennart
  full_name: Schäpermeier, Lennart
  last_name: Schäpermeier
- first_name: Carlos
  full_name: Hernández, Carlos
  last_name: Hernández
- first_name: Pascal
  full_name: Kerschke, Pascal
  last_name: Kerschke
- first_name: Heike
  full_name: Trautmann, Heike
  id: '100740'
  last_name: Trautmann
  orcid: 0000-0002-9788-8282
- first_name: Oliver
  full_name: Schütze, Oliver
  last_name: Schütze
citation:
  ama: Rodriguez-Fernandez AE, Schäpermeier L, Hernández C, Kerschke P, Trautmann
    H, Schütze O. Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal
    Optimization. <i>IEEE Transactions on Evolutionary Computation</i>. Published
    online 2024:1-1. doi:<a href="https://doi.org/10.1109/TEVC.2024.3458855">10.1109/TEVC.2024.3458855</a>
  apa: Rodriguez-Fernandez, A. E., Schäpermeier, L., Hernández, C., Kerschke, P.,
    Trautmann, H., &#38; Schütze, O. (2024). Finding ϵ-Locally Optimal Solutions for
    Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary
    Computation</i>, 1–1. <a href="https://doi.org/10.1109/TEVC.2024.3458855">https://doi.org/10.1109/TEVC.2024.3458855</a>
  bibtex: '@article{Rodriguez-Fernandez_Schäpermeier_Hernández_Kerschke_Trautmann_Schütze_2024,
    title={Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization},
    DOI={<a href="https://doi.org/10.1109/TEVC.2024.3458855">10.1109/TEVC.2024.3458855</a>},
    journal={IEEE Transactions on Evolutionary Computation}, author={Rodriguez-Fernandez,
    Angel E. and Schäpermeier, Lennart and Hernández, Carlos and Kerschke, Pascal
    and Trautmann, Heike and Schütze, Oliver}, year={2024}, pages={1–1} }'
  chicago: Rodriguez-Fernandez, Angel E., Lennart Schäpermeier, Carlos Hernández,
    Pascal Kerschke, Heike Trautmann, and Oliver Schütze. “Finding ϵ-Locally Optimal
    Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on
    Evolutionary Computation</i>, 2024, 1–1. <a href="https://doi.org/10.1109/TEVC.2024.3458855">https://doi.org/10.1109/TEVC.2024.3458855</a>.
  ieee: 'A. E. Rodriguez-Fernandez, L. Schäpermeier, C. Hernández, P. Kerschke, H.
    Trautmann, and O. Schütze, “Finding ϵ-Locally Optimal Solutions for Multi-Objective
    Multimodal Optimization,” <i>IEEE Transactions on Evolutionary Computation</i>,
    pp. 1–1, 2024, doi: <a href="https://doi.org/10.1109/TEVC.2024.3458855">10.1109/TEVC.2024.3458855</a>.'
  mla: Rodriguez-Fernandez, Angel E., et al. “Finding ϵ-Locally Optimal Solutions
    for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on Evolutionary
    Computation</i>, 2024, pp. 1–1, doi:<a href="https://doi.org/10.1109/TEVC.2024.3458855">10.1109/TEVC.2024.3458855</a>.
  short: A.E. Rodriguez-Fernandez, L. Schäpermeier, C. Hernández, P. Kerschke, H.
    Trautmann, O. Schütze, IEEE Transactions on Evolutionary Computation (2024) 1–1.
date_created: 2024-09-24T08:01:14Z
date_updated: 2024-09-24T08:01:47Z
doi: 10.1109/TEVC.2024.3458855
keyword:
- Optimization
- Evolutionary computation
- Approximation algorithms
- Benchmark testing
- Vectors
- Surveys
- Pareto optimization
- multi-objective optimization
- evolutionary computation
- multimodal optimization
- local solutions
language:
- iso: eng
page: 1-1
publication: IEEE Transactions on Evolutionary Computation
status: public
title: Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization
type: journal_article
user_id: '15504'
year: '2024'
...
---
_id: '59888'
abstract:
- lang: eng
  text: "Everyday explanations are interactive processes with the aim to provide a
    less knowledgeable person with reasonable information about other people, objects,
    or events. Because explanations are interactive communicative processes, the topical
    structure of an explanation may vary dynamically depending on the immediate feedback
    of the explainee. In this paper, we analyse topical transitions in medical explanations
    organised by different physicians (explainers) related to different forms of multimodal
    behaviour of caregivers (explainees) attending an explanation about the procedures
    of\r\nan upcoming surgery of a child. The analyses reveal that explainees’ multimodal
    behaviour with gaze shifts (and particularly gaze aversion) can predict a transition
    from an elaborated topic to a new one, whereas explainees’ forms of multimodal
    behaviour with static gaze cannot be related to changes of the topical structure."
article_type: original
author:
- first_name: Stefan Teodorov
  full_name: Lazarov, Stefan Teodorov
  id: '90345'
  last_name: Lazarov
  orcid: 0009-0009-0892-9483
- first_name: Kai
  full_name: Biermeier, Kai
  id: '55908'
  last_name: Biermeier
  orcid: 0000-0002-2879-2359
- first_name: Angela
  full_name: Grimminger, Angela
  id: '57578'
  last_name: Grimminger
citation:
  ama: Lazarov ST, Biermeier K, Grimminger A. Changes in the topical structure of
    explanations are related to explainees’ multimodal behaviour. <i>Interaction Studies</i>.
    2024;25(3):257-280. doi:<a href="https://doi.org/10.1075/is.23033.laz">10.1075/is.23033.laz</a>
  apa: Lazarov, S. T., Biermeier, K., &#38; Grimminger, A. (2024). Changes in the
    topical structure of explanations are related to explainees’ multimodal behaviour.
    <i>Interaction Studies</i>, <i>25</i>(3), 257–280. <a href="https://doi.org/10.1075/is.23033.laz">https://doi.org/10.1075/is.23033.laz</a>
  bibtex: '@article{Lazarov_Biermeier_Grimminger_2024, title={Changes in the topical
    structure of explanations are related to explainees’ multimodal behaviour}, volume={25},
    DOI={<a href="https://doi.org/10.1075/is.23033.laz">10.1075/is.23033.laz</a>},
    number={3}, journal={Interaction Studies}, publisher={John Benjamins}, author={Lazarov,
    Stefan Teodorov and Biermeier, Kai and Grimminger, Angela}, year={2024}, pages={257–280}
    }'
  chicago: 'Lazarov, Stefan Teodorov, Kai Biermeier, and Angela Grimminger. “Changes
    in the Topical Structure of Explanations Are Related to Explainees’ Multimodal
    Behaviour.” <i>Interaction Studies</i> 25, no. 3 (2024): 257–80. <a href="https://doi.org/10.1075/is.23033.laz">https://doi.org/10.1075/is.23033.laz</a>.'
  ieee: 'S. T. Lazarov, K. Biermeier, and A. Grimminger, “Changes in the topical structure
    of explanations are related to explainees’ multimodal behaviour,” <i>Interaction
    Studies</i>, vol. 25, no. 3, pp. 257–280, 2024, doi: <a href="https://doi.org/10.1075/is.23033.laz">10.1075/is.23033.laz</a>.'
  mla: Lazarov, Stefan Teodorov, et al. “Changes in the Topical Structure of Explanations
    Are Related to Explainees’ Multimodal Behaviour.” <i>Interaction Studies</i>,
    vol. 25, no. 3, John Benjamins, 2024, pp. 257–80, doi:<a href="https://doi.org/10.1075/is.23033.laz">10.1075/is.23033.laz</a>.
  short: S.T. Lazarov, K. Biermeier, A. Grimminger, Interaction Studies 25 (2024)
    257–280.
date_created: 2025-05-14T06:25:13Z
date_updated: 2025-06-27T13:57:36Z
doi: 10.1075/is.23033.laz
intvolume: '        25'
issue: '3'
keyword:
- explanations
- multimodal behaviour
- elaborations
- conditional probabilities
language:
- iso: eng
main_file_link:
- open_access: '1'
  url: https://www.jbe-platform.com/content/journals/10.1075/is.23033.laz#metrics_content
oa: '1'
page: 257 - 280
project:
- _id: '112'
  grant_number: '438445824'
  name: 'TRR 318 - A02: TRR 318 - Verstehensprozess einer Erklärung beobachten und
    auswerten (Teilprojekt A02)'
publication: Interaction Studies
publication_identifier:
  eissn:
  - 1572-0381
publication_status: published
publisher: John Benjamins
status: public
title: Changes in the topical structure of explanations are related to explainees’
  multimodal behaviour
type: journal_article
user_id: '90345'
volume: 25
year: '2024'
...
---
_id: '46318'
abstract:
- lang: eng
  text: 'Multi-objective (MO) optimization, i.e., the simultaneous optimization of
    multiple conflicting objectives, is gaining more and more attention in various
    research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter
    optimization), or logistics (e.g., vehicle routing). Many works in this domain
    mention the structural problem property of multimodality as a challenge from two
    classical perspectives: (1) finding all globally optimal solution sets, and (2)
    avoiding to get trapped in local optima. Interestingly, these streams seem to
    transfer many traditional concepts of single-objective (SO) optimization into
    claims, assumptions, or even terminology regarding the MO domain, but mostly neglect
    the understanding of the structural properties as well as the algorithmic search
    behavior on a problem’s landscape. However, some recent works counteract this
    trend, by investigating the fundamentals and characteristics of MO problems using
    new visualization techniques and gaining surprising insights. Using these visual
    insights, this work proposes a step towards a unified terminology to capture multimodality
    and locality in a broader way than it is usually done. This enables us to investigate
    current research activities in multimodal continuous MO optimization and to highlight
    new implications and promising research directions for the design of benchmark
    suites, the discovery of MO landscape features, the development of new MO (or
    even SO) optimization algorithms, and performance indicators. For all these topics,
    we provide a review of ideas and methods but also an outlook on future challenges,
    research potential and perspectives that result from recent developments.'
author:
- first_name: Christian
  full_name: Grimme, Christian
  last_name: Grimme
- first_name: Pascal
  full_name: Kerschke, Pascal
  last_name: Kerschke
- first_name: Pelin
  full_name: Aspar, Pelin
  last_name: Aspar
- first_name: Heike
  full_name: Trautmann, Heike
  id: '100740'
  last_name: Trautmann
  orcid: 0000-0002-9788-8282
- first_name: Mike
  full_name: Preuss, Mike
  last_name: Preuss
- first_name: André H.
  full_name: Deutz, André H.
  last_name: Deutz
- first_name: Hao
  full_name: Wang, Hao
  last_name: Wang
- first_name: Michael
  full_name: Emmerich, Michael
  last_name: Emmerich
citation:
  ama: 'Grimme C, Kerschke P, Aspar P, et al. Peeking beyond peaks: Challenges and
    research potentials of continuous multimodal multi-objective optimization. <i>Computers
    &#38; Operations Research</i>. 2021;136:105489. doi:<a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>'
  apa: 'Grimme, C., Kerschke, P., Aspar, P., Trautmann, H., Preuss, M., Deutz, A.
    H., Wang, H., &#38; Emmerich, M. (2021). Peeking beyond peaks: Challenges and
    research potentials of continuous multimodal multi-objective optimization. <i>Computers
    &#38; Operations Research</i>, <i>136</i>, 105489. <a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>'
  bibtex: '@article{Grimme_Kerschke_Aspar_Trautmann_Preuss_Deutz_Wang_Emmerich_2021,
    title={Peeking beyond peaks: Challenges and research potentials of continuous
    multimodal multi-objective optimization}, volume={136}, DOI={<a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>},
    journal={Computers &#38; Operations Research}, author={Grimme, Christian and Kerschke,
    Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André
    H. and Wang, Hao and Emmerich, Michael}, year={2021}, pages={105489} }'
  chicago: 'Grimme, Christian, Pascal Kerschke, Pelin Aspar, Heike Trautmann, Mike
    Preuss, André H. Deutz, Hao Wang, and Michael Emmerich. “Peeking beyond Peaks:
    Challenges and Research Potentials of Continuous Multimodal Multi-Objective Optimization.”
    <i>Computers &#38; Operations Research</i> 136 (2021): 105489. <a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>.'
  ieee: 'C. Grimme <i>et al.</i>, “Peeking beyond peaks: Challenges and research potentials
    of continuous multimodal multi-objective optimization,” <i>Computers &#38; Operations
    Research</i>, vol. 136, p. 105489, 2021, doi: <a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>.'
  mla: 'Grimme, Christian, et al. “Peeking beyond Peaks: Challenges and Research Potentials
    of Continuous Multimodal Multi-Objective Optimization.” <i>Computers &#38; Operations
    Research</i>, vol. 136, 2021, p. 105489, doi:<a href="https://doi.org/10.1016/j.cor.2021.105489">https://doi.org/10.1016/j.cor.2021.105489</a>.'
  short: C. Grimme, P. Kerschke, P. Aspar, H. Trautmann, M. Preuss, A.H. Deutz, H.
    Wang, M. Emmerich, Computers &#38; Operations Research 136 (2021) 105489.
date_created: 2023-08-04T07:28:34Z
date_updated: 2023-10-16T12:58:42Z
department:
- _id: '34'
- _id: '819'
doi: https://doi.org/10.1016/j.cor.2021.105489
intvolume: '       136'
keyword:
- Multimodal optimization
- Multi-objective continuous optimization
- Landscape analysis
- Visualization
- Benchmarking
- Theory
- Algorithms
language:
- iso: eng
page: '105489'
publication: Computers & Operations Research
publication_identifier:
  issn:
  - 0305-0548
status: public
title: 'Peeking beyond peaks: Challenges and research potentials of continuous multimodal
  multi-objective optimization'
type: journal_article
user_id: '15504'
volume: 136
year: '2021'
...
---
_id: '17204'
abstract:
- lang: eng
  text: 'In a longitudinal naturalistic study, we observed German mothers interacting
    with their infants when they were 3 and 6 months old. Pursuing the idea that infants’
    attention is socialized in everyday interactions, we explored whether eye contact
    is reinforced selectively by behavioral modification in the input provided to
    infants. Applying a microanalytical approach focusing on the sequential organization
    of interaction, we explored how the mother draws the infant’s attention to herself
    and how she tries to maintain attention when the infant is looking at her. Results
    showed that eye contact is reinforced by specific infant-directed practices: interrogatives
    and conversational openings, multimodal stimulation, repetition, and imitation.
    In addition, these practices are contingent on the infant’s own behavior. By comparing
    the two data points (3 and 6 months), we showed how the education of attention
    evolves hand-in-hand with the developing capacities of the infant.'
author:
- first_name: Iris
  full_name: Nomikou, Iris
  last_name: Nomikou
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Joanna
  full_name: Szufnarowska, Joanna
  last_name: Szufnarowska
citation:
  ama: 'Nomikou I, Rohlfing K, Szufnarowska J. Educating attention: recruiting, maintaining,
    and framing eye contact in early natural mother-infant interactions. <i>Interaction
    Studies</i>. 2013;14(2):240-267. doi:<a href="https://doi.org/10.1075/is.14.2.05nom">10.1075/is.14.2.05nom</a>'
  apa: 'Nomikou, I., Rohlfing, K., &#38; Szufnarowska, J. (2013). Educating attention:
    recruiting, maintaining, and framing eye contact in early natural mother-infant
    interactions. <i>Interaction Studies</i>, <i>14</i>(2), 240–267. <a href="https://doi.org/10.1075/is.14.2.05nom">https://doi.org/10.1075/is.14.2.05nom</a>'
  bibtex: '@article{Nomikou_Rohlfing_Szufnarowska_2013, title={Educating attention:
    recruiting, maintaining, and framing eye contact in early natural mother-infant
    interactions}, volume={14}, DOI={<a href="https://doi.org/10.1075/is.14.2.05nom">10.1075/is.14.2.05nom</a>},
    number={2}, journal={Interaction Studies}, publisher={John Benjamins Publishing
    Company}, author={Nomikou, Iris and Rohlfing, Katharina and Szufnarowska, Joanna},
    year={2013}, pages={240–267} }'
  chicago: 'Nomikou, Iris, Katharina Rohlfing, and Joanna Szufnarowska. “Educating
    Attention: Recruiting, Maintaining, and Framing Eye Contact in Early Natural Mother-Infant
    Interactions.” <i>Interaction Studies</i> 14, no. 2 (2013): 240–67. <a href="https://doi.org/10.1075/is.14.2.05nom">https://doi.org/10.1075/is.14.2.05nom</a>.'
  ieee: 'I. Nomikou, K. Rohlfing, and J. Szufnarowska, “Educating attention: recruiting,
    maintaining, and framing eye contact in early natural mother-infant interactions,”
    <i>Interaction Studies</i>, vol. 14, no. 2, pp. 240–267, 2013, doi: <a href="https://doi.org/10.1075/is.14.2.05nom">10.1075/is.14.2.05nom</a>.'
  mla: 'Nomikou, Iris, et al. “Educating Attention: Recruiting, Maintaining, and Framing
    Eye Contact in Early Natural Mother-Infant Interactions.” <i>Interaction Studies</i>,
    vol. 14, no. 2, John Benjamins Publishing Company, 2013, pp. 240–67, doi:<a href="https://doi.org/10.1075/is.14.2.05nom">10.1075/is.14.2.05nom</a>.'
  short: I. Nomikou, K. Rohlfing, J. Szufnarowska, Interaction Studies 14 (2013) 240–267.
date_created: 2020-06-24T13:01:23Z
date_updated: 2023-02-01T16:12:50Z
department:
- _id: '749'
doi: 10.1075/is.14.2.05nom
intvolume: '        14'
issue: '2'
keyword:
- interactional adaptation
- multimodal input
- social learning
- ecology of attention
- eye contact
language:
- iso: eng
page: 240-267
publication: Interaction Studies
publication_identifier:
  issn:
  - 1572-0381
publisher: John Benjamins Publishing Company
status: public
title: 'Educating attention: recruiting, maintaining, and framing eye contact in early
  natural mother-infant interactions'
type: journal_article
user_id: '14931'
volume: 14
year: '2013'
...
---
_id: '17225'
abstract:
- lang: eng
  text: How is communicative gesture behavior in robots perceived by humans? Although
    gesture is crucial in social interaction, this research question is still largely
    unexplored in the field of social robotics. Thus, the main objective of the present
    work is to investigate how gestural machine behaviors can be used to design more
    natural communication in social robots. The chosen approach is twofold. Firstly,
    the technical challenges encountered when implementing a speech-gesture generation
    model on a robotic platform are tackled. We present a framework that enables the
    humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm
    gestures at run-time, while not being limited to a predefined repertoire of motor
    actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled
    experiments. To gain a deeper understanding of how communicative robot gesture
    might impact and shape human perception and evaluation of human-robot interaction,
    we conducted a between-subjects experimental study using the humanoid robot in
    a joint task scenario. We manipulated the non-verbal behaviors of the robot in
    three experimental conditions, so that it would refer to objects by utilizing
    either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e.,
    semantically matching speech and gesture) or (3) incongruent multimodal (i.e.,
    semantically non-matching speech and gesture) utterances. Our findings reveal
    that the robot is evaluated more positively when non-verbal behaviors such as
    hand and arm gestures are displayed along with speech, even if they do not semantically
    match the spoken utterance.
author:
- first_name: Maha
  full_name: Salem, Maha
  last_name: Salem
- first_name: Stefan
  full_name: Kopp, Stefan
  last_name: Kopp
- first_name: Ipke
  full_name: Wachsmuth, Ipke
  last_name: Wachsmuth
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Frank
  full_name: Joublin, Frank
  last_name: Joublin
citation:
  ama: Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation
    of communicative robot gesture. <i>International Journal of Social Robotics, Special
    Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a
    href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>
  apa: Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012).
    Generation and evaluation of communicative robot gesture. <i>International Journal
    of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>,
    <i>4</i>(2), 201–217. <a href="https://doi.org/10.1007/s12369-011-0124-9">https://doi.org/10.1007/s12369-011-0124-9</a>
  bibtex: '@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation
    and evaluation of communicative robot gesture}, volume={4}, DOI={<a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>},
    number={2}, journal={International Journal of Social Robotics, Special Issue on
    Expectations, Intentions, and Actions}, publisher={Springer Science + Business
    Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing,
    Katharina and Joublin, Frank}, year={2012}, pages={201–217} }'
  chicago: 'Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank
    Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International
    Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>
    4, no. 2 (2012): 201–17. <a href="https://doi.org/10.1007/s12369-011-0124-9">https://doi.org/10.1007/s12369-011-0124-9</a>.'
  ieee: 'M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation
    and evaluation of communicative robot gesture,” <i>International Journal of Social
    Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4,
    no. 2, pp. 201–217, 2012, doi: <a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>.'
  mla: Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.”
    <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions,
    and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17,
    doi:<a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>.
  short: M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal
    of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012)
    201–217.
date_created: 2020-06-24T13:01:48Z
date_updated: 2023-02-01T16:21:50Z
department:
- _id: '749'
doi: 10.1007/s12369-011-0124-9
intvolume: '         4'
issue: '2'
keyword:
- Social Human-Robot Interaction
- Multimodal Interaction and Conversational Skills
- Robot Companions and Social Robots
- Non-verbal Cues and Expressiveness
language:
- iso: eng
page: 201-217
publication: International Journal of Social Robotics, Special Issue on Expectations,
  Intentions, and Actions
publication_identifier:
  issn:
  - 1875-4805
publisher: Springer Science + Business Media
status: public
title: Generation and evaluation of communicative robot gesture
type: journal_article
user_id: '14931'
volume: 4
year: '2012'
...
---
_id: '17428'
abstract:
- lang: eng
  text: How is communicative gesture behavior in robots perceived by humans? Although
    gesture is crucial in social interaction, this research question is still largely
    unexplored in the field of social robotics. Thus, the main objective of the present
    work is to investigate how gestural machine behaviors can be used to design more
    natural communication in social robots. The chosen approach is twofold. Firstly,
    the technical challenges encountered when implementing a speech-gesture generation
    model on a robotic platform are tackled. We present a framework that enables the
    humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm
    gestures at run-time, while not being limited to a predefined repertoire of motor
    actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled
    experiments. To gain a deeper understanding of how communicative robot gesture
    might impact and shape human perception and evaluation of human-robot interaction,
    we conducted a between-subjects experimental study using the humanoid robot in
    a joint task scenario. We manipulated the non-verbal behaviors of the robot in
    three experimental conditions, so that it would refer to objects by utilizing
    either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e.,
    semantically matching speech and gesture) or (3) incongruent multimodal (i.e.,
    semantically non-matching speech and gesture) utterances. Our findings reveal
    that the robot is evaluated more positively when non-verbal behaviors such as
    hand and arm gestures are displayed along with speech, even if they do not semantically
    match the spoken utterance.
author:
- first_name: Maha
  full_name: Salem, Maha
  last_name: Salem
- first_name: Stefan
  full_name: Kopp, Stefan
  last_name: Kopp
- first_name: Ipke
  full_name: Wachsmuth, Ipke
  last_name: Wachsmuth
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Frank
  full_name: Joublin, Frank
  last_name: Joublin
citation:
  ama: Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation
    of communicative robot gesture. <i>International Journal of Social Robotics, Special
    Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a
    href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>
  apa: Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012).
    Generation and evaluation of communicative robot gesture. <i>International Journal
    of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>,
    <i>4</i>(2), 201–217. <a href="https://doi.org/10.1007/s12369-011-0124-9">https://doi.org/10.1007/s12369-011-0124-9</a>
  bibtex: '@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation
    and evaluation of communicative robot gesture}, volume={4}, DOI={<a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>},
    number={2}, journal={International Journal of Social Robotics, Special Issue on
    Expectations, Intentions, and Actions}, publisher={Springer Science + Business
    Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing,
    Katharina and Joublin, Frank}, year={2012}, pages={201–217} }'
  chicago: 'Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank
    Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International
    Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>
    4, no. 2 (2012): 201–17. <a href="https://doi.org/10.1007/s12369-011-0124-9">https://doi.org/10.1007/s12369-011-0124-9</a>.'
  ieee: 'M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation
    and evaluation of communicative robot gesture,” <i>International Journal of Social
    Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4,
    no. 2, pp. 201–217, 2012, doi: <a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>.'
  mla: Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.”
    <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions,
    and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17,
    doi:<a href="https://doi.org/10.1007/s12369-011-0124-9">10.1007/s12369-011-0124-9</a>.
  short: M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal
    of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012)
    201–217.
date_created: 2020-07-28T11:44:02Z
date_updated: 2023-02-01T12:52:23Z
department:
- _id: '749'
doi: 10.1007/s12369-011-0124-9
intvolume: '         4'
issue: '2'
keyword:
- Social Human-Robot Interaction
- Multimodal Interaction and Conversational Skills
- Robot Companions and Social Robots
- Non-verbal Cues and Expressiveness
language:
- iso: eng
page: 201-217
publication: International Journal of Social Robotics, Special Issue on Expectations,
  Intentions, and Actions
publication_identifier:
  issn:
  - 1875-4805
publisher: Springer Science + Business Media
status: public
title: Generation and evaluation of communicative robot gesture
type: journal_article
user_id: '14931'
volume: 4
year: '2012'
...
---
_id: '17246'
author:
- first_name: Iris
  full_name: Nomikou, Iris
  last_name: Nomikou
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
citation:
  ama: 'Nomikou I, Rohlfing K. Language Does Something: Body Action and Language in
    Maternal Input to Three-Month-Olds. <i>IEEE Transactions on Autonomous Mental
    Development</i>. 2011;3(2):113-128. doi:<a href="https://doi.org/10.1109/TAMD.2011.2140113">10.1109/TAMD.2011.2140113</a>'
  apa: 'Nomikou, I., &#38; Rohlfing, K. (2011). Language Does Something: Body Action
    and Language in Maternal Input to Three-Month-Olds. <i>IEEE Transactions on Autonomous
    Mental Development</i>, <i>3</i>(2), 113–128. <a href="https://doi.org/10.1109/TAMD.2011.2140113">https://doi.org/10.1109/TAMD.2011.2140113</a>'
  bibtex: '@article{Nomikou_Rohlfing_2011, title={Language Does Something: Body Action
    and Language in Maternal Input to Three-Month-Olds}, volume={3}, DOI={<a href="https://doi.org/10.1109/TAMD.2011.2140113">10.1109/TAMD.2011.2140113</a>},
    number={2}, journal={IEEE Transactions on Autonomous Mental Development}, publisher={Institute
    of Electrical &#38; Electronics Engineers (IEEE)}, author={Nomikou, Iris and Rohlfing,
    Katharina}, year={2011}, pages={113–128} }'
  chicago: 'Nomikou, Iris, and Katharina Rohlfing. “Language Does Something: Body
    Action and Language in Maternal Input to Three-Month-Olds.” <i>IEEE Transactions
    on Autonomous Mental Development</i> 3, no. 2 (2011): 113–28. <a href="https://doi.org/10.1109/TAMD.2011.2140113">https://doi.org/10.1109/TAMD.2011.2140113</a>.'
  ieee: 'I. Nomikou and K. Rohlfing, “Language Does Something: Body Action and Language
    in Maternal Input to Three-Month-Olds,” <i>IEEE Transactions on Autonomous Mental
    Development</i>, vol. 3, no. 2, pp. 113–128, 2011, doi: <a href="https://doi.org/10.1109/TAMD.2011.2140113">10.1109/TAMD.2011.2140113</a>.'
  mla: 'Nomikou, Iris, and Katharina Rohlfing. “Language Does Something: Body Action
    and Language in Maternal Input to Three-Month-Olds.” <i>IEEE Transactions on Autonomous
    Mental Development</i>, vol. 3, no. 2, Institute of Electrical &#38; Electronics
    Engineers (IEEE), 2011, pp. 113–28, doi:<a href="https://doi.org/10.1109/TAMD.2011.2140113">10.1109/TAMD.2011.2140113</a>.'
  short: I. Nomikou, K. Rohlfing, IEEE Transactions on Autonomous Mental Development
    3 (2011) 113–128.
date_created: 2020-06-24T13:02:12Z
date_updated: 2023-02-01T12:54:33Z
department:
- _id: '749'
doi: 10.1109/TAMD.2011.2140113
intvolume: '         3'
issue: '2'
keyword:
- acoustic packaging
- mother-child interaction
- social learning
- multimodal grounding in input
- ecology of interactions
- synchrony
language:
- iso: eng
page: 113-128
publication: IEEE Transactions on Autonomous Mental Development
publication_identifier:
  issn:
  - 1943-0612
publisher: Institute of Electrical & Electronics Engineers (IEEE)
status: public
title: 'Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds'
type: journal_article
user_id: '14931'
volume: 3
year: '2011'
...
---
_id: '17430'
abstract:
- lang: eng
  text: 'Previous work has shown that gestural behaviors affect anthropomorphic inferences
    about artificial communicators such as virtual agents. In an experiment with a
    humanoid robot, we investigated to what extent gesture would affect anthropomorphic
    inferences about the robot. Particularly, we examined the effects of the robot''s
    hand and arm gestures on the attribution of typically human traits, likability
    of the robot, shared reality, and future contact intentions after interacting
    with the robot. For this, we manipulated the non-verbal behaviors of the humanoid
    robot in three experimental conditions: (1) no gesture, (2) congruent gesture,
    and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures
    in the two gesture (vs. no gesture) conditions. The results confirm our predictions:
    when the robot used gestures during interaction, it was anthropomorphized more,
    participants perceived it as more likable, reported greater shared reality with
    it, and showed increased future contact intentions than when the robot gave instructions
    without using gestures. Surprisingly, this effect was particularly pronounced
    when the robot''s gestures were partly incongruent with speech. These findings
    show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic
    perceptions and the mental models humans form of a humanoid robot during interaction.'
author:
- first_name: Maha
  full_name: Salem, Maha
  last_name: Salem
- first_name: Friederike Anne
  full_name: Eyssel, Friederike Anne
  last_name: Eyssel
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Stefan
  full_name: Kopp, Stefan
  last_name: Kopp
- first_name: F.
  full_name: Joublin, F.
  last_name: Joublin
citation:
  ama: 'Salem M, Eyssel FA, Rohlfing K, Kopp S, Joublin F. Effects of gesture on the
    perception of psychological anthropomorphism: A case study with a humanoid robot.
    In: Mutlu B, Bartneck C, Ham J, Evers V, Kanda T, eds. <i>Social Robotics</i>.
    Vol 7072. Springer Science + Business Media; 2011:31-41. doi:<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>'
  apa: 'Salem, M., Eyssel, F. A., Rohlfing, K., Kopp, S., &#38; Joublin, F. (2011).
    Effects of gesture on the perception of psychological anthropomorphism: A case
    study with a humanoid robot. In B. Mutlu, C. Bartneck, J. Ham, V. Evers, &#38;
    T. Kanda (Eds.), <i>Social Robotics</i> (Vol. 7072, pp. 31–41). Springer Science
    + Business Media. <a href="https://doi.org/10.1007/978-3-642-25504-5_4">https://doi.org/10.1007/978-3-642-25504-5_4</a>'
  bibtex: '@inproceedings{Salem_Eyssel_Rohlfing_Kopp_Joublin_2011, title={Effects
    of gesture on the perception of psychological anthropomorphism: A case study with
    a humanoid robot}, volume={7072}, DOI={<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>},
    booktitle={Social Robotics}, publisher={Springer Science + Business Media}, author={Salem,
    Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and
    Joublin, F.}, editor={Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and
    Kanda, T.}, year={2011}, pages={31–41} }'
  chicago: 'Salem, Maha, Friederike Anne Eyssel, Katharina Rohlfing, Stefan Kopp,
    and F. Joublin. “Effects of Gesture on the Perception of Psychological Anthropomorphism:
    A Case Study with a Humanoid Robot.” In <i>Social Robotics</i>, edited by B. Mutlu,
    C. Bartneck, J. Ham, V. Evers, and T. Kanda, 7072:31–41. Springer Science + Business
    Media, 2011. <a href="https://doi.org/10.1007/978-3-642-25504-5_4">https://doi.org/10.1007/978-3-642-25504-5_4</a>.'
  ieee: 'M. Salem, F. A. Eyssel, K. Rohlfing, S. Kopp, and F. Joublin, “Effects of
    gesture on the perception of psychological anthropomorphism: A case study with
    a humanoid robot,” in <i>Social Robotics</i>, 2011, vol. 7072, pp. 31–41, doi:
    <a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>.'
  mla: 'Salem, Maha, et al. “Effects of Gesture on the Perception of Psychological
    Anthropomorphism: A Case Study with a Humanoid Robot.” <i>Social Robotics</i>,
    edited by B. Mutlu et al., vol. 7072, Springer Science + Business Media, 2011,
    pp. 31–41, doi:<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>.'
  short: 'M. Salem, F.A. Eyssel, K. Rohlfing, S. Kopp, F. Joublin, in: B. Mutlu, C.
    Bartneck, J. Ham, V. Evers, T. Kanda (Eds.), Social Robotics, Springer Science
    + Business Media, 2011, pp. 31–41.'
date_created: 2020-07-28T11:44:04Z
date_updated: 2023-02-01T12:52:02Z
department:
- _id: '749'
doi: 10.1007/978-3-642-25504-5_4
editor:
- first_name: B.
  full_name: Mutlu, B.
  last_name: Mutlu
- first_name: C.
  full_name: Bartneck, C.
  last_name: Bartneck
- first_name: J.
  full_name: Ham, J.
  last_name: Ham
- first_name: V.
  full_name: Evers, V.
  last_name: Evers
- first_name: T.
  full_name: Kanda, T.
  last_name: Kanda
intvolume: '      7072'
keyword:
- Multimodal Interaction and Conversational Skills
- Anthropomorphism
- Non-verbal Cues and Expressiveness
language:
- iso: eng
page: 31-41
publication: Social Robotics
publication_identifier:
  isbn:
  - 978-3-642-25503-8
publisher: Springer Science + Business Media
status: public
title: 'Effects of gesture on the perception of psychological anthropomorphism: A
  case study with a humanoid robot'
type: conference
user_id: '14931'
volume: 7072
year: '2011'
...
---
_id: '17244'
abstract:
- lang: eng
  text: 'Robots interacting with humans need to understand actions and make use of
    language in social interactions. Research on infant development has shown that
    language helps the learner to structure visual observations of action. This acoustic
    information typically in the form of narration overlaps with action sequences
    and provides infants with a bottom-up guide to ﬁnd structure within them. This
    concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff.
    We developed and integrated a prominence detection module in our acoustic packaging
    system to detect semantically relevant information linguistically highlighted
    by the tutor. Evaluation results on speech data from adult-infant interactions
    show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based
    on acoustic packages which uses the prominence detection results to generate acoustic
    feedback is presented. Index Terms: prominence, multimodal action segmentation,
    human robot interaction, feedback'
author:
- first_name: Lars
  full_name: Schillingmann, Lars
  last_name: Schillingmann
- first_name: Petra
  full_name: Wagner, Petra
  last_name: Wagner
- first_name: Christian
  full_name: Munier, Christian
  last_name: Munier
- first_name: Britta
  full_name: Wrede, Britta
  last_name: Wrede
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
citation:
  ama: 'Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Using Prominence
    Detection to Generate Acoustic Feedback in Tutoring Scenarios. In: <i>Interspeech
    2011 (12th Annual Conference of the International Speech Communication Association)</i>.
    ; 2011:3105-3108.'
  apa: Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011).
    Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios.
    <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication
    Association)</i>, 3105–3108.
  bibtex: '@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Using
    Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}, booktitle={Interspeech
    2011 (12th Annual Conference of the International Speech Communication Association)},
    author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede,
    Britta and Rohlfing, Katharina}, year={2011}, pages={3105–3108} }'
  chicago: Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and
    Katharina Rohlfing. “Using Prominence Detection to Generate Acoustic Feedback
    in Tutoring Scenarios.” In <i>Interspeech 2011 (12th Annual Conference of the
    International Speech Communication Association)</i>, 3105–8, 2011.
  ieee: L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Using
    Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios,” in
    <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication
    Association)</i>, 2011, pp. 3105–3108.
  mla: Schillingmann, Lars, et al. “Using Prominence Detection to Generate Acoustic
    Feedback in Tutoring Scenarios.” <i>Interspeech 2011 (12th Annual Conference of
    the International Speech Communication Association)</i>, 2011, pp. 3105–08.
  short: 'L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: Interspeech
    2011 (12th Annual Conference of the International Speech Communication Association),
    2011, pp. 3105–3108.'
date_created: 2020-06-24T13:02:10Z
date_updated: 2023-02-01T12:53:54Z
department:
- _id: '749'
keyword:
- Feedback
- Human Robot Interaction
- Prominence
- Multimodal Action Segmentation
language:
- iso: eng
page: 3105-3108
publication: Interspeech 2011 (12th Annual Conference of the International Speech
  Communication Association)
status: public
title: Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios
type: conference
user_id: '14931'
year: '2011'
...
---
_id: '17245'
author:
- first_name: Lars
  full_name: Schillingmann, Lars
  last_name: Schillingmann
- first_name: Petra
  full_name: Wagner, Petra
  last_name: Wagner
- first_name: Christian
  full_name: Munier, Christian
  last_name: Munier
- first_name: Britta
  full_name: Wrede, Britta
  last_name: Wrede
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
citation:
  ama: 'Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Acoustic Packaging
    and the Learning of Words. In: ; 2011. doi:<a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">10.3389/conf.fncom.2011.52.00020</a>'
  apa: Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011).
    <i>Acoustic Packaging and the Learning of Words</i>. <a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>
  bibtex: '@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Acoustic
    Packaging and the Learning of Words}, DOI={<a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">10.3389/conf.fncom.2011.52.00020</a>},
    author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede,
    Britta and Rohlfing, Katharina}, year={2011} }'
  chicago: Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and
    Katharina Rohlfing. “Acoustic Packaging and the Learning of Words,” 2011. <a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>.
  ieee: 'L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Acoustic
    Packaging and the Learning of Words,” 2011, doi: <a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">10.3389/conf.fncom.2011.52.00020</a>.'
  mla: Schillingmann, Lars, et al. <i>Acoustic Packaging and the Learning of Words</i>.
    2011, doi:<a href="https://doi.org/10.3389/conf.fncom.2011.52.00020">10.3389/conf.fncom.2011.52.00020</a>.
  short: 'L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: 2011.'
date_created: 2020-06-24T13:02:11Z
date_updated: 2023-02-01T12:54:16Z
department:
- _id: '749'
doi: 10.3389/conf.fncom.2011.52.00020
keyword:
- Prominence
- Multimodal Action Segmentation
- Feedback
- Color Saliency
- Human Robot Interaction
language:
- iso: eng
publication_identifier:
  issn:
  - 1662-5188
status: public
title: Acoustic Packaging and the Learning of Words
type: conference
user_id: '14931'
year: '2011'
...
---
_id: '17242'
abstract:
- lang: eng
  text: 'Previous work has shown that gestural behaviors affect anthropomorphic inferences
    about artificial communicators such as virtual agents. In an experiment with a
    humanoid robot, we investigated to what extent gesture would affect anthropomorphic
    inferences about the robot. Particularly, we examined the effects of the robot''s
    hand and arm gestures on the attribution of typically human traits, likability
    of the robot, shared reality, and future contact intentions after interacting
    with the robot. For this, we manipulated the non-verbal behaviors of the humanoid
    robot in three experimental conditions: (1) no gesture, (2) congruent gesture,
    and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures
    in the two gesture (vs. no gesture) conditions. The results confirm our predictions:
    when the robot used gestures during interaction, it was anthropomorphized more,
    participants perceived it as more likable, reported greater shared reality with
    it, and showed increased future contact intentions than when the robot gave instructions
    without using gestures. Surprisingly, this effect was particularly pronounced
    when the robot''s gestures were partly incongruent with speech. These findings
    show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic
    perceptions and the mental models humans form of a humanoid robot during interaction.'
author:
- first_name: Maha
  full_name: Salem, Maha
  last_name: Salem
- first_name: Friederike Anne
  full_name: Eyssel, Friederike Anne
  last_name: Eyssel
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Stefan
  full_name: Kopp, Stefan
  last_name: Kopp
- first_name: F.
  full_name: Joublin, F.
  last_name: Joublin
citation:
  ama: 'Salem M, Eyssel FA, Rohlfing K, Kopp S, Joublin F. Effects of gesture on the
    perception of psychological anthropomorphism: A case study with a humanoid robot.
    In: Mutlu B, Bartneck C, Ham J, Evers V, Kanda T, eds. <i>Social Robotics</i>.
    Vol 7072. Springer Science + Business Media; 2011:31-41. doi:<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>'
  apa: 'Salem, M., Eyssel, F. A., Rohlfing, K., Kopp, S., &#38; Joublin, F. (2011).
    Effects of gesture on the perception of psychological anthropomorphism: A case
    study with a humanoid robot. In B. Mutlu, C. Bartneck, J. Ham, V. Evers, &#38;
    T. Kanda (Eds.), <i>Social Robotics</i> (Vol. 7072, pp. 31–41). Springer Science
    + Business Media. <a href="https://doi.org/10.1007/978-3-642-25504-5_4">https://doi.org/10.1007/978-3-642-25504-5_4</a>'
  bibtex: '@inproceedings{Salem_Eyssel_Rohlfing_Kopp_Joublin_2011, title={Effects
    of gesture on the perception of psychological anthropomorphism: A case study with
    a humanoid robot}, volume={7072}, DOI={<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>},
    booktitle={Social Robotics}, publisher={Springer Science + Business Media}, author={Salem,
    Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and
    Joublin, F.}, editor={Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and
    Kanda, T.}, year={2011}, pages={31–41} }'
  chicago: 'Salem, Maha, Friederike Anne Eyssel, Katharina Rohlfing, Stefan Kopp,
    and F. Joublin. “Effects of Gesture on the Perception of Psychological Anthropomorphism:
    A Case Study with a Humanoid Robot.” In <i>Social Robotics</i>, edited by B. Mutlu,
    C. Bartneck, J. Ham, V. Evers, and T. Kanda, 7072:31–41. Springer Science + Business
    Media, 2011. <a href="https://doi.org/10.1007/978-3-642-25504-5_4">https://doi.org/10.1007/978-3-642-25504-5_4</a>.'
  ieee: 'M. Salem, F. A. Eyssel, K. Rohlfing, S. Kopp, and F. Joublin, “Effects of
    gesture on the perception of psychological anthropomorphism: A case study with
    a humanoid robot,” in <i>Social Robotics</i>, 2011, vol. 7072, pp. 31–41, doi:
    <a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>.'
  mla: 'Salem, Maha, et al. “Effects of Gesture on the Perception of Psychological
    Anthropomorphism: A Case Study with a Humanoid Robot.” <i>Social Robotics</i>,
    edited by B. Mutlu et al., vol. 7072, Springer Science + Business Media, 2011,
    pp. 31–41, doi:<a href="https://doi.org/10.1007/978-3-642-25504-5_4">10.1007/978-3-642-25504-5_4</a>.'
  short: 'M. Salem, F.A. Eyssel, K. Rohlfing, S. Kopp, F. Joublin, in: B. Mutlu, C.
    Bartneck, J. Ham, V. Evers, T. Kanda (Eds.), Social Robotics, Springer Science
    + Business Media, 2011, pp. 31–41.'
date_created: 2020-06-24T13:02:07Z
date_updated: 2023-02-01T12:58:57Z
department:
- _id: '749'
doi: 10.1007/978-3-642-25504-5_4
editor:
- first_name: B.
  full_name: Mutlu, B.
  last_name: Mutlu
- first_name: C.
  full_name: Bartneck, C.
  last_name: Bartneck
- first_name: J.
  full_name: Ham, J.
  last_name: Ham
- first_name: V.
  full_name: Evers, V.
  last_name: Evers
- first_name: T.
  full_name: Kanda, T.
  last_name: Kanda
intvolume: '      7072'
keyword:
- Multimodal Interaction and Conversational Skills
- Anthropomorphism
- Non-verbal Cues and Expressiveness
language:
- iso: eng
page: 31-41
publication: Social Robotics
publication_identifier:
  isbn:
  - 978-3-642-25503-8
publisher: Springer Science + Business Media
status: public
title: 'Effects of gesture on the perception of psychological anthropomorphism: A
  case study with a humanoid robot'
type: conference
user_id: '14931'
volume: 7072
year: '2011'
...
---
_id: '11892'
abstract:
- lang: eng
  text: For an environment to be perceived as being smart, contextual information
    has to be gathered to adapt the system's behavior and its interface towards the
    user. Being a rich source of context information speech can be acquired unobtrusively
    by microphone arrays and then processed to extract information about the user
    and his environment. In this paper, a system for joint temporal segmentation,
    speaker localization, and identification is presented, which is supported by face
    identification from video data obtained from a steerable camera. Special attention
    is paid to latency aspects and online processing capabilities, as they are important
    for the application under investigation, namely ambient communication. It describes
    the vision of terminal-less, session-less and multi-modal telecommunication with
    remote partners, where the user can move freely within his home while the communication
    follows him. The speaker diarization serves as a context source, which has been
    integrated in a service-oriented middleware architecture and provided to the application
    to select the most appropriate I/O device and to steer the camera towards the
    speaker during ambient communication.
author:
- first_name: Joerg
  full_name: Schmalenstroeer, Joerg
  id: '460'
  last_name: Schmalenstroeer
- first_name: Reinhold
  full_name: Haeb-Umbach, Reinhold
  id: '242'
  last_name: Haeb-Umbach
citation:
  ama: Schmalenstroeer J, Haeb-Umbach R. Online Diarization of Streaming Audio-Visual
    Data for Smart Environments. <i>IEEE Journal of Selected Topics in Signal Processing</i>.
    2010;4(5):845-856. doi:<a href="https://doi.org/10.1109/JSTSP.2010.2050519">10.1109/JSTSP.2010.2050519</a>
  apa: Schmalenstroeer, J., &#38; Haeb-Umbach, R. (2010). Online Diarization of Streaming
    Audio-Visual Data for Smart Environments. <i>IEEE Journal of Selected Topics in
    Signal Processing</i>, <i>4</i>(5), 845–856. <a href="https://doi.org/10.1109/JSTSP.2010.2050519">https://doi.org/10.1109/JSTSP.2010.2050519</a>
  bibtex: '@article{Schmalenstroeer_Haeb-Umbach_2010, title={Online Diarization of
    Streaming Audio-Visual Data for Smart Environments}, volume={4}, DOI={<a href="https://doi.org/10.1109/JSTSP.2010.2050519">10.1109/JSTSP.2010.2050519</a>},
    number={5}, journal={IEEE Journal of Selected Topics in Signal Processing}, author={Schmalenstroeer,
    Joerg and Haeb-Umbach, Reinhold}, year={2010}, pages={845–856} }'
  chicago: 'Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization
    of Streaming Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected
    Topics in Signal Processing</i> 4, no. 5 (2010): 845–56. <a href="https://doi.org/10.1109/JSTSP.2010.2050519">https://doi.org/10.1109/JSTSP.2010.2050519</a>.'
  ieee: 'J. Schmalenstroeer and R. Haeb-Umbach, “Online Diarization of Streaming Audio-Visual
    Data for Smart Environments,” <i>IEEE Journal of Selected Topics in Signal Processing</i>,
    vol. 4, no. 5, pp. 845–856, 2010, doi: <a href="https://doi.org/10.1109/JSTSP.2010.2050519">10.1109/JSTSP.2010.2050519</a>.'
  mla: Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization of Streaming
    Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected Topics
    in Signal Processing</i>, vol. 4, no. 5, 2010, pp. 845–56, doi:<a href="https://doi.org/10.1109/JSTSP.2010.2050519">10.1109/JSTSP.2010.2050519</a>.
  short: J. Schmalenstroeer, R. Haeb-Umbach, IEEE Journal of Selected Topics in Signal
    Processing 4 (2010) 845–856.
date_created: 2019-07-12T05:30:16Z
date_updated: 2023-10-26T08:10:18Z
department:
- _id: '54'
doi: 10.1109/JSTSP.2010.2050519
intvolume: '         4'
issue: '5'
keyword:
- audio streaming
- audio visual data streaming
- context information speech
- face identification
- face recognition
- image segmentation
- middleware
- multimodal telecommunication
- online diarization
- service oriented middleware architecture
- sessionless telecommunication
- software architecture
- speaker identification
- speaker localization
- speaker recognition
- steerable camera
- telecommunication computing
- temporal segmentation
- terminal-less telecommunication
- video streaming
language:
- iso: eng
main_file_link:
- open_access: '1'
  url: https://groups.uni-paderborn.de/nt/pubs/2010/ScHa10.pdf
oa: '1'
page: 845-856
publication: IEEE Journal of Selected Topics in Signal Processing
quality_controlled: '1'
status: public
title: Online Diarization of Streaming Audio-Visual Data for Smart Environments
type: journal_article
user_id: '460'
volume: 4
year: '2010'
...
---
_id: '17256'
author:
- first_name: Angela
  full_name: Grimminger, Angela
  id: '57578'
  last_name: Grimminger
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Prisca
  full_name: Stenneken, Prisca
  last_name: Stenneken
citation:
  ama: Grimminger A, Rohlfing K, Stenneken P. Children’s lexical skills and task demands
    affect gestural behavior in mothers of late-talking children and children with
    typical language development. <i>Gesture</i>. 2010;10(2):251-278. doi:<a href="https://doi.org/10.1075/gest.10.2-3.07gri">10.1075/gest.10.2-3.07gri</a>
  apa: Grimminger, A., Rohlfing, K., &#38; Stenneken, P. (2010). Children’s lexical
    skills and task demands affect gestural behavior in mothers of late-talking children
    and children with typical language development. <i>Gesture</i>, <i>10</i>(2),
    251–278. <a href="https://doi.org/10.1075/gest.10.2-3.07gri">https://doi.org/10.1075/gest.10.2-3.07gri</a>
  bibtex: '@article{Grimminger_Rohlfing_Stenneken_2010, title={Children’s lexical
    skills and task demands affect gestural behavior in mothers of late-talking children
    and children with typical language development}, volume={10}, DOI={<a href="https://doi.org/10.1075/gest.10.2-3.07gri">10.1075/gest.10.2-3.07gri</a>},
    number={2}, journal={Gesture}, publisher={John Benjamins Publishing Company},
    author={Grimminger, Angela and Rohlfing, Katharina and Stenneken, Prisca}, year={2010},
    pages={251–278} }'
  chicago: 'Grimminger, Angela, Katharina Rohlfing, and Prisca Stenneken. “Children’s
    Lexical Skills and Task Demands Affect Gestural Behavior in Mothers of Late-Talking
    Children and Children with Typical Language Development.” <i>Gesture</i> 10, no.
    2 (2010): 251–78. <a href="https://doi.org/10.1075/gest.10.2-3.07gri">https://doi.org/10.1075/gest.10.2-3.07gri</a>.'
  ieee: 'A. Grimminger, K. Rohlfing, and P. Stenneken, “Children’s lexical skills
    and task demands affect gestural behavior in mothers of late-talking children
    and children with typical language development,” <i>Gesture</i>, vol. 10, no.
    2, pp. 251–278, 2010, doi: <a href="https://doi.org/10.1075/gest.10.2-3.07gri">10.1075/gest.10.2-3.07gri</a>.'
  mla: Grimminger, Angela, et al. “Children’s Lexical Skills and Task Demands Affect
    Gestural Behavior in Mothers of Late-Talking Children and Children with Typical
    Language Development.” <i>Gesture</i>, vol. 10, no. 2, John Benjamins Publishing
    Company, 2010, pp. 251–78, doi:<a href="https://doi.org/10.1075/gest.10.2-3.07gri">10.1075/gest.10.2-3.07gri</a>.
  short: A. Grimminger, K. Rohlfing, P. Stenneken, Gesture 10 (2010) 251–278.
date_created: 2020-06-24T13:02:24Z
date_updated: 2023-02-01T13:01:23Z
department:
- _id: '749'
doi: 10.1075/gest.10.2-3.07gri
intvolume: '        10'
issue: '2'
keyword:
- task- oriented dialogue
- Late Talker
- maternal multimodal input
- gestural motherese
language:
- iso: eng
page: 251-278
publication: Gesture
publication_identifier:
  issn:
  - 1569-9773
publisher: John Benjamins Publishing Company
status: public
title: Children's lexical skills and task demands affect gestural behavior in mothers
  of late-talking children and children with typical language development
type: journal_article
user_id: '14931'
volume: 10
year: '2010'
...
---
_id: '17259'
abstract:
- lang: eng
  text: Learning is a social endeavor, in which the learner generally receives support
    from his/her social partner(s). In developmental research – even though tutors/adults
    behavior modifications in their speech, gestures and motions have been extensively
    studied, studies barely consider the recipient’s (i.e. the child’s) perspective
    in the analysis of the adult’s presentation, In addition, the variability in parental
    behavior, i.e. the fact that not every parent modifies her/his behavior in the
    same way, found less fine-grained analysis. In contrast, in this paper, we assume
    an interactional perspective investigating the loop between the tutor’s and the
    learner’s actions. With this approach, we aim both at discovering the levels and
    features of variability and at achieving a better understanding of how they come
    about within the course of the interaction. For our analysis, we used a combination
    of (1) qualitative investigation derived from ethnomethodological Conversation
    Analysis (CA), (2) semi-automatic computational 2D hand tracking and (3) a mathematically
    based visualization of the data. Our analysis reveals that tutors not only shape
    their demonstrations differently with regard to the intended recipient per se
    (adult-directed vs. child-directed), but most importantly that the learner’s feedback
    during the presentation is consequential for the concrete ways in which the presentation
    is carried out.
author:
- first_name: Karola
  full_name: Pitsch, Karola
  last_name: Pitsch
- first_name: Anna-Lisa
  full_name: Vollmer, Anna-Lisa
  last_name: Vollmer
- first_name: Jannik
  full_name: Fritsch, Jannik
  last_name: Fritsch
- first_name: Britta
  full_name: Wrede, Britta
  last_name: Wrede
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Gerhard
  full_name: Sagerer, Gerhard
  last_name: Sagerer
citation:
  ama: 'Pitsch K, Vollmer A-L, Fritsch J, Wrede B, Rohlfing K, Sagerer G. On the loop
    of action modification and the recipient’s gaze in adult-child interaction. In:
    <i>Gesture and Speech in Interaction</i>. ; 2009.'
  apa: Pitsch, K., Vollmer, A.-L., Fritsch, J., Wrede, B., Rohlfing, K., &#38; Sagerer,
    G. (2009). On the loop of action modification and the recipient’s gaze in adult-child
    interaction. <i>Gesture and Speech in Interaction</i>.
  bibtex: '@inproceedings{Pitsch_Vollmer_Fritsch_Wrede_Rohlfing_Sagerer_2009, title={On
    the loop of action modification and the recipient’s gaze in adult-child interaction},
    booktitle={Gesture and Speech in Interaction}, author={Pitsch, Karola and Vollmer,
    Anna-Lisa and Fritsch, Jannik and Wrede, Britta and Rohlfing, Katharina and Sagerer,
    Gerhard}, year={2009} }'
  chicago: Pitsch, Karola, Anna-Lisa Vollmer, Jannik Fritsch, Britta Wrede, Katharina
    Rohlfing, and Gerhard Sagerer. “On the Loop of Action Modification and the Recipient’s
    Gaze in Adult-Child Interaction.” In <i>Gesture and Speech in Interaction</i>,
    2009.
  ieee: K. Pitsch, A.-L. Vollmer, J. Fritsch, B. Wrede, K. Rohlfing, and G. Sagerer,
    “On the loop of action modification and the recipient’s gaze in adult-child interaction,”
    2009.
  mla: Pitsch, Karola, et al. “On the Loop of Action Modification and the Recipient’s
    Gaze in Adult-Child Interaction.” <i>Gesture and Speech in Interaction</i>, 2009.
  short: 'K. Pitsch, A.-L. Vollmer, J. Fritsch, B. Wrede, K. Rohlfing, G. Sagerer,
    in: Gesture and Speech in Interaction, 2009.'
date_created: 2020-06-24T13:02:27Z
date_updated: 2023-02-01T13:02:31Z
department:
- _id: '749'
keyword:
- gaze
- gesture
- Multimodal
- adult-child interaction
language:
- iso: eng
publication: Gesture and Speech in Interaction
status: public
title: On the loop of action modification and the recipient's gaze in adult-child
  interaction
type: conference
user_id: '14931'
year: '2009'
...
---
_id: '17272'
abstract:
- lang: eng
  text: In developmental research, tutoring behavior has been identified as scaffolding
    infants' learning processes. It has been defined in terms of child-directed speech
    (Motherese), child-directed motion (Motionese), and contingency. In the field
    of developmental robotics, research often assumes that in human-robot interaction
    (HRI), robots are treated similar to infants, because their immature cognitive
    capabilities benefit from this behavior. However, according to our knowledge,
    it has barely been studied whether this is true and how exactly humans alter their
    behavior towards a robotic interaction partner. In this paper, we present results
    concerning the acceptance of a robotic agent in a social learning scenario obtained
    via comparison to adults and 8-11 months old infants in equal conditions. These
    results constitute an important empirical basis for making use of tutoring behavior
    in social robotics. In our study, we performed a detailed multimodal analysis
    of HRI in a tutoring situation using the example of a robot simulation equipped
    with a bottom-up saliency-based attention model. Our results reveal significant
    differences in hand movement velocity, motion pauses, range of motion, and eye
    gaze suggesting that for example adults decrease their hand movement velocity
    in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI)
    and this decrease is even higher in the Adult-Robot Interaction (ARI). We also
    found important differences between ACI and ARI in how the behavior is modified
    over time as the interaction unfolds. These findings indicate the necessity of
    integrating top-down feedback structures into a bottom-up system for robots to
    be fully accepted as interaction partners.
author:
- first_name: Anna-Lisa
  full_name: Vollmer, Anna-Lisa
  last_name: Vollmer
- first_name: Katrin Solveig
  full_name: Lohan, Katrin Solveig
  last_name: Lohan
- first_name: Kerstin
  full_name: Fischer, Kerstin
  last_name: Fischer
- first_name: Yukie
  full_name: Nagai, Yukie
  last_name: Nagai
- first_name: Karola
  full_name: Pitsch, Karola
  last_name: Pitsch
- first_name: Jannik
  full_name: Fritsch, Jannik
  last_name: Fritsch
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
- first_name: Britta
  full_name: Wrede, Britta
  last_name: Wrede
citation:
  ama: 'Vollmer A-L, Lohan KS, Fischer K, et al. People modify their tutoring behavior
    in robot-directed interaction for action learning. In: <i>Development and Learning,
    2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>.
    IEEE; 2009:1-6. doi:<a href="https://doi.org/10.1109/DEVLRN.2009.5175516">10.1109/DEVLRN.2009.5175516</a>'
  apa: Vollmer, A.-L., Lohan, K. S., Fischer, K., Nagai, Y., Pitsch, K., Fritsch,
    J., Rohlfing, K., &#38; Wrede, B. (2009). People modify their tutoring behavior
    in robot-directed interaction for action learning. <i>Development and Learning,
    2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>,
    1–6. <a href="https://doi.org/10.1109/DEVLRN.2009.5175516">https://doi.org/10.1109/DEVLRN.2009.5175516</a>
  bibtex: '@inproceedings{Vollmer_Lohan_Fischer_Nagai_Pitsch_Fritsch_Rohlfing_Wrede_2009,
    title={People modify their tutoring behavior in robot-directed interaction for
    action learning}, DOI={<a href="https://doi.org/10.1109/DEVLRN.2009.5175516">10.1109/DEVLRN.2009.5175516</a>},
    booktitle={Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference
    on Development and Learning}, publisher={IEEE}, author={Vollmer, Anna-Lisa and
    Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola
    and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}, year={2009}, pages={1–6}
    }'
  chicago: Vollmer, Anna-Lisa, Katrin Solveig Lohan, Kerstin Fischer, Yukie Nagai,
    Karola Pitsch, Jannik Fritsch, Katharina Rohlfing, and Britta Wrede. “People Modify
    Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” In
    <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference
    on Development and Learning</i>, 1–6. IEEE, 2009. <a href="https://doi.org/10.1109/DEVLRN.2009.5175516">https://doi.org/10.1109/DEVLRN.2009.5175516</a>.
  ieee: 'A.-L. Vollmer <i>et al.</i>, “People modify their tutoring behavior in robot-directed
    interaction for action learning,” in <i>Development and Learning, 2009. ICDL 2009.
    IEEE 8th International Conference on Development and Learning</i>, 2009, pp. 1–6,
    doi: <a href="https://doi.org/10.1109/DEVLRN.2009.5175516">10.1109/DEVLRN.2009.5175516</a>.'
  mla: Vollmer, Anna-Lisa, et al. “People Modify Their Tutoring Behavior in Robot-Directed
    Interaction for Action Learning.” <i>Development and Learning, 2009. ICDL 2009.
    IEEE 8th International Conference on Development and Learning</i>, IEEE, 2009,
    pp. 1–6, doi:<a href="https://doi.org/10.1109/DEVLRN.2009.5175516">10.1109/DEVLRN.2009.5175516</a>.
  short: 'A.-L. Vollmer, K.S. Lohan, K. Fischer, Y. Nagai, K. Pitsch, J. Fritsch,
    K. Rohlfing, B. Wrede, in: Development and Learning, 2009. ICDL 2009. IEEE 8th
    International Conference on Development and Learning, IEEE, 2009, pp. 1–6.'
date_created: 2020-06-24T13:02:43Z
date_updated: 2023-02-01T13:06:43Z
department:
- _id: '749'
doi: 10.1109/DEVLRN.2009.5175516
keyword:
- robot simulation
- hand movement velocity
- robotic interaction partner
- robotic agent
- robot-directed interaction
- multimodal analysis
- Motionese
- Motherese
- intelligent tutoring systems
- immature cognitive capability
- human computer interaction
- eye gaze
- child-directed speech
- child-directed motion
- bottom-up system
- bottom-up saliency-based attention model
- adult-robot interaction
- adult-child interaction
- adult-adult interaction
- human-robot interaction
- action learning
- social learning scenario
- social robotics
- software agents
- top-down feedback structures
- tutoring behavior
language:
- iso: eng
page: 1-6
publication: Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference
  on Development and Learning
publisher: IEEE
status: public
title: People modify their tutoring behavior in robot-directed interaction for action
  learning
type: conference
user_id: '14931'
year: '2009'
...
---
_id: '17268'
author:
- first_name: Lars
  full_name: Schillingmann, Lars
  last_name: Schillingmann
- first_name: Britta
  full_name: Wrede, Britta
  last_name: Wrede
- first_name: Katharina
  full_name: Rohlfing, Katharina
  id: '50352'
  last_name: Rohlfing
citation:
  ama: 'Schillingmann L, Wrede B, Rohlfing K. Towards a Computational Model of Acoustic
    Packaging. In: <i>International Conference on Development and Learning (ICDL 2009)</i>.
    IEEE; 2009. doi:<a href="https://doi.org/10.1109/devlrn.2009.5175523">10.1109/devlrn.2009.5175523</a>'
  apa: Schillingmann, L., Wrede, B., &#38; Rohlfing, K. (2009). Towards a Computational
    Model of Acoustic Packaging. <i>International Conference on Development and Learning
    (ICDL 2009)</i>. <a href="https://doi.org/10.1109/devlrn.2009.5175523">https://doi.org/10.1109/devlrn.2009.5175523</a>
  bibtex: '@inproceedings{Schillingmann_Wrede_Rohlfing_2009, title={Towards a Computational
    Model of Acoustic Packaging}, DOI={<a href="https://doi.org/10.1109/devlrn.2009.5175523">10.1109/devlrn.2009.5175523</a>},
    booktitle={International Conference on Development and Learning (ICDL 2009)},
    publisher={IEEE}, author={Schillingmann, Lars and Wrede, Britta and Rohlfing,
    Katharina}, year={2009} }'
  chicago: Schillingmann, Lars, Britta Wrede, and Katharina Rohlfing. “Towards a Computational
    Model of Acoustic Packaging.” In <i>International Conference on Development and
    Learning (ICDL 2009)</i>. IEEE, 2009. <a href="https://doi.org/10.1109/devlrn.2009.5175523">https://doi.org/10.1109/devlrn.2009.5175523</a>.
  ieee: 'L. Schillingmann, B. Wrede, and K. Rohlfing, “Towards a Computational Model
    of Acoustic Packaging,” 2009, doi: <a href="https://doi.org/10.1109/devlrn.2009.5175523">10.1109/devlrn.2009.5175523</a>.'
  mla: Schillingmann, Lars, et al. “Towards a Computational Model of Acoustic Packaging.”
    <i>International Conference on Development and Learning (ICDL 2009)</i>, IEEE,
    2009, doi:<a href="https://doi.org/10.1109/devlrn.2009.5175523">10.1109/devlrn.2009.5175523</a>.
  short: 'L. Schillingmann, B. Wrede, K. Rohlfing, in: International Conference on
    Development and Learning (ICDL 2009), IEEE, 2009.'
date_created: 2020-06-24T13:02:38Z
date_updated: 2023-02-01T13:05:32Z
department:
- _id: '749'
doi: 10.1109/devlrn.2009.5175523
keyword:
- Acoustic Packaging
- multimodal
language:
- iso: eng
publication: International Conference on Development and Learning (ICDL 2009)
publication_identifier:
  isbn:
  - 978-1-4244-4117-4
publisher: IEEE
status: public
title: Towards a Computational Model of Acoustic Packaging
type: conference
user_id: '14931'
year: '2009'
...
---
_id: '38543'
abstract:
- lang: eng
  text: Today a large variety of mobile interaction devices such as PDAs and mobile
    phones enforce the development of a wide range of user interfaces for each platform.
    The complexity even grows, when multiple interaction devices are used to perform
    the same task and when different modalities have to be supported. We introduce
    a new dialog model for the abstraction of concrete user interfaces with a separate
    advanced control layer for the integration of different modalities. In this context,
    we present the Dialog and Interface Specification Language (DISL), which comes
    with a proof-of-concept implementation.
author:
- first_name: Robbie
  full_name: Schäfer, Robbie
  last_name: Schäfer
- first_name: Steffen
  full_name: Bleul, Steffen
  last_name: Bleul
- first_name: Wolfgang
  full_name: Müller, Wolfgang
  id: '16243'
  last_name: Müller
citation:
  ama: 'Schäfer R, Bleul S, Müller W. Dialog Modelling for Multiple Devices and Multiple
    Interaction Modalities. In: <i>Proceedings of the 5th International Workshop on
    Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>. ; 2006.
    doi:<a href="https://doi.org/10.1007/978-3-540-70816-2_4">10.1007/978-3-540-70816-2_4</a>'
  apa: Schäfer, R., Bleul, S., &#38; Müller, W. (2006). Dialog Modelling for Multiple
    Devices and Multiple Interaction Modalities. <i>Proceedings of the 5th International
    Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>.
    <a href="https://doi.org/10.1007/978-3-540-70816-2_4">https://doi.org/10.1007/978-3-540-70816-2_4</a>
  bibtex: '@inproceedings{Schäfer_Bleul_Müller_2006, place={Hasselt, Belgium}, title={Dialog
    Modelling for Multiple Devices and Multiple Interaction Modalities}, DOI={<a href="https://doi.org/10.1007/978-3-540-70816-2_4">10.1007/978-3-540-70816-2_4</a>},
    booktitle={Proceedings of the 5th International Workshop on Task Models and Diagrams
    for User Interface Design (TAMODIA’2006)}, author={Schäfer, Robbie and Bleul,
    Steffen and Müller, Wolfgang}, year={2006} }'
  chicago: Schäfer, Robbie, Steffen Bleul, and Wolfgang Müller. “Dialog Modelling
    for Multiple Devices and Multiple Interaction Modalities.” In <i>Proceedings of
    the 5th International Workshop on Task Models and Diagrams for User Interface
    Design (TAMODIA’2006)</i>. Hasselt, Belgium, 2006. <a href="https://doi.org/10.1007/978-3-540-70816-2_4">https://doi.org/10.1007/978-3-540-70816-2_4</a>.
  ieee: 'R. Schäfer, S. Bleul, and W. Müller, “Dialog Modelling for Multiple Devices
    and Multiple Interaction Modalities,” 2006, doi: <a href="https://doi.org/10.1007/978-3-540-70816-2_4">10.1007/978-3-540-70816-2_4</a>.'
  mla: Schäfer, Robbie, et al. “Dialog Modelling for Multiple Devices and Multiple
    Interaction Modalities.” <i>Proceedings of the 5th International Workshop on Task
    Models and Diagrams for User Interface Design (TAMODIA’2006)</i>, 2006, doi:<a
    href="https://doi.org/10.1007/978-3-540-70816-2_4">10.1007/978-3-540-70816-2_4</a>.
  short: 'R. Schäfer, S. Bleul, W. Müller, in: Proceedings of the 5th International
    Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006),
    Hasselt, Belgium, 2006.'
date_created: 2023-01-24T08:03:51Z
date_updated: 2023-01-24T08:03:56Z
department:
- _id: '672'
doi: 10.1007/978-3-540-70816-2_4
keyword:
- User Interface     Interaction Manager     Output Device     Multimodal Interface     Interaction
  Object
language:
- iso: eng
place: Hasselt, Belgium
publication: Proceedings of the 5th International Workshop on Task Models and Diagrams
  for User Interface Design (TAMODIA'2006)
publication_identifier:
  isbn:
  - 978-3-540-70815-5
status: public
title: Dialog Modelling for Multiple Devices and Multiple Interaction Modalities
type: conference
user_id: '5786'
year: '2006'
...
---
_id: '39350'
abstract:
- lang: eng
  text: Variation in different mobile devices with different capabilities and interaction
    modalities as well as changing user context in nomadic applications, poses huge
    challenges to the design of user interfaces. To avoid multiple designs for each
    device or modality, it is almost a must to employ a model-based approach. In this
    short paper, we present a new dialog model for multimodal interaction together
    with an advanced control model, which can either be used for direct modeling by
    an interface designer or in conjunction with higher level models.
author:
- first_name: Robbie
  full_name: Schäfer, Robbie
  last_name: Schäfer
- first_name: Steffen
  full_name: Bleul, Steffen
  last_name: Bleul
- first_name: Wolfgang
  full_name: Müller, Wolfgang
  id: '16243'
  last_name: Müller
citation:
  ama: 'Schäfer R, Bleul S, Müller W. A Novel Dialog Model for the Design of Multimodal
    User Interfaces. In: <i>Proceedings of EHCI-DSVIS 2005</i>. Lecture Notes in Computer
    Science . ; 2004.'
  apa: Schäfer, R., Bleul, S., &#38; Müller, W. (2004). A Novel Dialog Model for the
    Design of Multimodal User Interfaces. <i>Proceedings of EHCI-DSVIS 2005</i>.
  bibtex: '@inproceedings{Schäfer_Bleul_Müller_2004, place={Tremsbüttel, Hamburg},
    series={Lecture Notes in Computer Science }, title={A Novel Dialog Model for the
    Design of Multimodal User Interfaces}, booktitle={Proceedings of EHCI-DSVIS 2005},
    author={Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}, year={2004},
    collection={Lecture Notes in Computer Science } }'
  chicago: Schäfer, Robbie, Steffen Bleul, and Wolfgang Müller. “A Novel Dialog Model
    for the Design of Multimodal User Interfaces.” In <i>Proceedings of EHCI-DSVIS
    2005</i>. Lecture Notes in Computer Science . Tremsbüttel, Hamburg, 2004.
  ieee: R. Schäfer, S. Bleul, and W. Müller, “A Novel Dialog Model for the Design
    of Multimodal User Interfaces,” 2004.
  mla: Schäfer, Robbie, et al. “A Novel Dialog Model for the Design of Multimodal
    User Interfaces.” <i>Proceedings of EHCI-DSVIS 2005</i>, 2004.
  short: 'R. Schäfer, S. Bleul, W. Müller, in: Proceedings of EHCI-DSVIS 2005, Tremsbüttel,
    Hamburg, 2004.'
date_created: 2023-01-24T09:26:58Z
date_updated: 2023-01-24T09:27:03Z
department:
- _id: '672'
keyword:
- Multimodal User Interface     High Level Model     Multimodal User     High Level
  Approach     Dialog Model
language:
- iso: eng
place: Tremsbüttel, Hamburg
publication: Proceedings of EHCI-DSVIS 2005
series_title: 'Lecture Notes in Computer Science '
status: public
title: A Novel Dialog Model for the Design of Multimodal User Interfaces
type: conference
user_id: '5786'
year: '2004'
...
