[{"publication":"IEEE Transactions on Evolutionary Computation","type":"journal_article","status":"public","_id":"63053","department":[{"_id":"819"}],"user_id":"15504","keyword":["Optimization","Evolutionary computation","Hands","Proposals","Convergence","Computational efficiency","Artificial intelligence","Accuracy","Approximation algorithms","Aerospace electronics","Multi-objective optimization","evolutionary algorithms","nearly optimal solutions","multimodal optimization","archiving","continuation"],"language":[{"iso":"eng"}],"year":"2025","page":"1-1","citation":{"ama":"Hernández C, Rodriguez-Fernandez AE, Schäpermeier L, Cuate O, Trautmann H, Schütze O. An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary Computation</i>. Published online 2025:1-1. doi:<a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">10.1109/TEVC.2025.3637276</a>","ieee":"C. Hernández, A. E. Rodriguez-Fernandez, L. Schäpermeier, O. Cuate, H. Trautmann, and O. Schütze, “An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization,” <i>IEEE Transactions on Evolutionary Computation</i>, pp. 1–1, 2025, doi: <a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">10.1109/TEVC.2025.3637276</a>.","chicago":"Hernández, Carlos, Angel E. Rodriguez-Fernandez, Lennart Schäpermeier, Oliver Cuate, Heike Trautmann, and Oliver Schütze. “An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on Evolutionary Computation</i>, 2025, 1–1. <a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">https://doi.org/10.1109/TEVC.2025.3637276</a>.","mla":"Hernández, Carlos, et al. “An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on Evolutionary Computation</i>, 2025, pp. 1–1, doi:<a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">10.1109/TEVC.2025.3637276</a>.","short":"C. Hernández, A.E. Rodriguez-Fernandez, L. Schäpermeier, O. Cuate, H. Trautmann, O. Schütze, IEEE Transactions on Evolutionary Computation (2025) 1–1.","bibtex":"@article{Hernández_Rodriguez-Fernandez_Schäpermeier_Cuate_Trautmann_Schütze_2025, title={An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization}, DOI={<a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">10.1109/TEVC.2025.3637276</a>}, journal={IEEE Transactions on Evolutionary Computation}, author={Hernández, Carlos and Rodriguez-Fernandez, Angel E. and Schäpermeier, Lennart and Cuate, Oliver and Trautmann, Heike and Schütze, Oliver}, year={2025}, pages={1–1} }","apa":"Hernández, C., Rodriguez-Fernandez, A. E., Schäpermeier, L., Cuate, O., Trautmann, H., &#38; Schütze, O. (2025). An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary Computation</i>, 1–1. <a href=\"https://doi.org/10.1109/TEVC.2025.3637276\">https://doi.org/10.1109/TEVC.2025.3637276</a>"},"date_updated":"2025-12-12T06:13:51Z","date_created":"2025-12-12T06:13:06Z","author":[{"last_name":"Hernández","full_name":"Hernández, Carlos","first_name":"Carlos"},{"first_name":"Angel E.","last_name":"Rodriguez-Fernandez","full_name":"Rodriguez-Fernandez, Angel E."},{"last_name":"Schäpermeier","full_name":"Schäpermeier, Lennart","first_name":"Lennart"},{"last_name":"Cuate","full_name":"Cuate, Oliver","first_name":"Oliver"},{"first_name":"Heike","orcid":"0000-0002-9788-8282","last_name":"Trautmann","id":"100740","full_name":"Trautmann, Heike"},{"full_name":"Schütze, Oliver","last_name":"Schütze","first_name":"Oliver"}],"title":"An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization","doi":"10.1109/TEVC.2025.3637276"},{"doi":"10.1109/TEVC.2024.3458855","title":"Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization","date_created":"2024-09-24T08:01:14Z","author":[{"first_name":"Angel E.","last_name":"Rodriguez-Fernandez","full_name":"Rodriguez-Fernandez, Angel E."},{"full_name":"Schäpermeier, Lennart","last_name":"Schäpermeier","first_name":"Lennart"},{"first_name":"Carlos","full_name":"Hernández, Carlos","last_name":"Hernández"},{"first_name":"Pascal","last_name":"Kerschke","full_name":"Kerschke, Pascal"},{"last_name":"Trautmann","orcid":"0000-0002-9788-8282","full_name":"Trautmann, Heike","id":"100740","first_name":"Heike"},{"full_name":"Schütze, Oliver","last_name":"Schütze","first_name":"Oliver"}],"date_updated":"2024-09-24T08:01:47Z","page":"1-1","citation":{"ieee":"A. E. Rodriguez-Fernandez, L. Schäpermeier, C. Hernández, P. Kerschke, H. Trautmann, and O. Schütze, “Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization,” <i>IEEE Transactions on Evolutionary Computation</i>, pp. 1–1, 2024, doi: <a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">10.1109/TEVC.2024.3458855</a>.","chicago":"Rodriguez-Fernandez, Angel E., Lennart Schäpermeier, Carlos Hernández, Pascal Kerschke, Heike Trautmann, and Oliver Schütze. “Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on Evolutionary Computation</i>, 2024, 1–1. <a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">https://doi.org/10.1109/TEVC.2024.3458855</a>.","ama":"Rodriguez-Fernandez AE, Schäpermeier L, Hernández C, Kerschke P, Trautmann H, Schütze O. Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary Computation</i>. Published online 2024:1-1. doi:<a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">10.1109/TEVC.2024.3458855</a>","apa":"Rodriguez-Fernandez, A. E., Schäpermeier, L., Hernández, C., Kerschke, P., Trautmann, H., &#38; Schütze, O. (2024). Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization. <i>IEEE Transactions on Evolutionary Computation</i>, 1–1. <a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">https://doi.org/10.1109/TEVC.2024.3458855</a>","mla":"Rodriguez-Fernandez, Angel E., et al. “Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization.” <i>IEEE Transactions on Evolutionary Computation</i>, 2024, pp. 1–1, doi:<a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">10.1109/TEVC.2024.3458855</a>.","short":"A.E. Rodriguez-Fernandez, L. Schäpermeier, C. Hernández, P. Kerschke, H. Trautmann, O. Schütze, IEEE Transactions on Evolutionary Computation (2024) 1–1.","bibtex":"@article{Rodriguez-Fernandez_Schäpermeier_Hernández_Kerschke_Trautmann_Schütze_2024, title={Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization}, DOI={<a href=\"https://doi.org/10.1109/TEVC.2024.3458855\">10.1109/TEVC.2024.3458855</a>}, journal={IEEE Transactions on Evolutionary Computation}, author={Rodriguez-Fernandez, Angel E. and Schäpermeier, Lennart and Hernández, Carlos and Kerschke, Pascal and Trautmann, Heike and Schütze, Oliver}, year={2024}, pages={1–1} }"},"year":"2024","language":[{"iso":"eng"}],"keyword":["Optimization","Evolutionary computation","Approximation algorithms","Benchmark testing","Vectors","Surveys","Pareto optimization","multi-objective optimization","evolutionary computation","multimodal optimization","local solutions"],"user_id":"15504","_id":"56221","status":"public","publication":"IEEE Transactions on Evolutionary Computation","type":"journal_article"},{"language":[{"iso":"eng"}],"keyword":["explanations","multimodal behaviour","elaborations","conditional probabilities"],"abstract":[{"text":"Everyday explanations are interactive processes with the aim to provide a less knowledgeable person with reasonable information about other people, objects, or events. Because explanations are interactive communicative processes, the topical structure of an explanation may vary dynamically depending on the immediate feedback of the explainee. In this paper, we analyse topical transitions in medical explanations organised by different physicians (explainers) related to different forms of multimodal behaviour of caregivers (explainees) attending an explanation about the procedures of\r\nan upcoming surgery of a child. The analyses reveal that explainees’ multimodal behaviour with gaze shifts (and particularly gaze aversion) can predict a transition from an elaborated topic to a new one, whereas explainees’ forms of multimodal behaviour with static gaze cannot be related to changes of the topical structure.","lang":"eng"}],"publication":"Interaction Studies","title":"Changes in the topical structure of explanations are related to explainees’ multimodal behaviour","date_created":"2025-05-14T06:25:13Z","publisher":"John Benjamins","year":"2024","issue":"3","article_type":"original","user_id":"90345","_id":"59888","project":[{"_id":"112","name":"TRR 318 - A02: TRR 318 - Verstehensprozess einer Erklärung beobachten und auswerten (Teilprojekt A02)","grant_number":"438445824"}],"status":"public","type":"journal_article","doi":"10.1075/is.23033.laz","main_file_link":[{"url":"https://www.jbe-platform.com/content/journals/10.1075/is.23033.laz#metrics_content","open_access":"1"}],"volume":25,"author":[{"id":"90345","full_name":"Lazarov, Stefan Teodorov","orcid":"0009-0009-0892-9483","last_name":"Lazarov","first_name":"Stefan Teodorov"},{"first_name":"Kai","full_name":"Biermeier, Kai","id":"55908","orcid":"0000-0002-2879-2359","last_name":"Biermeier"},{"first_name":"Angela","last_name":"Grimminger","id":"57578","full_name":"Grimminger, Angela"}],"date_updated":"2025-06-27T13:57:36Z","oa":"1","intvolume":"        25","page":"257 - 280","citation":{"apa":"Lazarov, S. T., Biermeier, K., &#38; Grimminger, A. (2024). Changes in the topical structure of explanations are related to explainees’ multimodal behaviour. <i>Interaction Studies</i>, <i>25</i>(3), 257–280. <a href=\"https://doi.org/10.1075/is.23033.laz\">https://doi.org/10.1075/is.23033.laz</a>","mla":"Lazarov, Stefan Teodorov, et al. “Changes in the Topical Structure of Explanations Are Related to Explainees’ Multimodal Behaviour.” <i>Interaction Studies</i>, vol. 25, no. 3, John Benjamins, 2024, pp. 257–80, doi:<a href=\"https://doi.org/10.1075/is.23033.laz\">10.1075/is.23033.laz</a>.","short":"S.T. Lazarov, K. Biermeier, A. Grimminger, Interaction Studies 25 (2024) 257–280.","bibtex":"@article{Lazarov_Biermeier_Grimminger_2024, title={Changes in the topical structure of explanations are related to explainees’ multimodal behaviour}, volume={25}, DOI={<a href=\"https://doi.org/10.1075/is.23033.laz\">10.1075/is.23033.laz</a>}, number={3}, journal={Interaction Studies}, publisher={John Benjamins}, author={Lazarov, Stefan Teodorov and Biermeier, Kai and Grimminger, Angela}, year={2024}, pages={257–280} }","ama":"Lazarov ST, Biermeier K, Grimminger A. Changes in the topical structure of explanations are related to explainees’ multimodal behaviour. <i>Interaction Studies</i>. 2024;25(3):257-280. doi:<a href=\"https://doi.org/10.1075/is.23033.laz\">10.1075/is.23033.laz</a>","chicago":"Lazarov, Stefan Teodorov, Kai Biermeier, and Angela Grimminger. “Changes in the Topical Structure of Explanations Are Related to Explainees’ Multimodal Behaviour.” <i>Interaction Studies</i> 25, no. 3 (2024): 257–80. <a href=\"https://doi.org/10.1075/is.23033.laz\">https://doi.org/10.1075/is.23033.laz</a>.","ieee":"S. T. Lazarov, K. Biermeier, and A. Grimminger, “Changes in the topical structure of explanations are related to explainees’ multimodal behaviour,” <i>Interaction Studies</i>, vol. 25, no. 3, pp. 257–280, 2024, doi: <a href=\"https://doi.org/10.1075/is.23033.laz\">10.1075/is.23033.laz</a>."},"publication_identifier":{"eissn":["1572-0381"]},"publication_status":"published"},{"user_id":"15504","department":[{"_id":"34"},{"_id":"819"}],"_id":"46318","language":[{"iso":"eng"}],"keyword":["Multimodal optimization","Multi-objective continuous optimization","Landscape analysis","Visualization","Benchmarking","Theory","Algorithms"],"type":"journal_article","publication":"Computers & Operations Research","status":"public","abstract":[{"text":"Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments.","lang":"eng"}],"date_created":"2023-08-04T07:28:34Z","author":[{"first_name":"Christian","last_name":"Grimme","full_name":"Grimme, Christian"},{"first_name":"Pascal","full_name":"Kerschke, Pascal","last_name":"Kerschke"},{"full_name":"Aspar, Pelin","last_name":"Aspar","first_name":"Pelin"},{"first_name":"Heike","full_name":"Trautmann, Heike","id":"100740","last_name":"Trautmann","orcid":"0000-0002-9788-8282"},{"last_name":"Preuss","full_name":"Preuss, Mike","first_name":"Mike"},{"first_name":"André H.","full_name":"Deutz, André H.","last_name":"Deutz"},{"full_name":"Wang, Hao","last_name":"Wang","first_name":"Hao"},{"full_name":"Emmerich, Michael","last_name":"Emmerich","first_name":"Michael"}],"volume":136,"date_updated":"2023-10-16T12:58:42Z","doi":"https://doi.org/10.1016/j.cor.2021.105489","title":"Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization","publication_identifier":{"issn":["0305-0548"]},"citation":{"ieee":"C. Grimme <i>et al.</i>, “Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization,” <i>Computers &#38; Operations Research</i>, vol. 136, p. 105489, 2021, doi: <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","chicago":"Grimme, Christian, Pascal Kerschke, Pelin Aspar, Heike Trautmann, Mike Preuss, André H. Deutz, Hao Wang, and Michael Emmerich. “Peeking beyond Peaks: Challenges and Research Potentials of Continuous Multimodal Multi-Objective Optimization.” <i>Computers &#38; Operations Research</i> 136 (2021): 105489. <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","ama":"Grimme C, Kerschke P, Aspar P, et al. Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization. <i>Computers &#38; Operations Research</i>. 2021;136:105489. doi:<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>","bibtex":"@article{Grimme_Kerschke_Aspar_Trautmann_Preuss_Deutz_Wang_Emmerich_2021, title={Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}, volume={136}, DOI={<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>}, journal={Computers &#38; Operations Research}, author={Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}, year={2021}, pages={105489} }","mla":"Grimme, Christian, et al. “Peeking beyond Peaks: Challenges and Research Potentials of Continuous Multimodal Multi-Objective Optimization.” <i>Computers &#38; Operations Research</i>, vol. 136, 2021, p. 105489, doi:<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","short":"C. Grimme, P. Kerschke, P. Aspar, H. Trautmann, M. Preuss, A.H. Deutz, H. Wang, M. Emmerich, Computers &#38; Operations Research 136 (2021) 105489.","apa":"Grimme, C., Kerschke, P., Aspar, P., Trautmann, H., Preuss, M., Deutz, A. H., Wang, H., &#38; Emmerich, M. (2021). Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization. <i>Computers &#38; Operations Research</i>, <i>136</i>, 105489. <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>"},"page":"105489","intvolume":"       136","year":"2021"},{"publication_identifier":{"issn":["1572-0381"]},"issue":"2","year":"2013","intvolume":"        14","page":"240-267","citation":{"apa":"Nomikou, I., Rohlfing, K., &#38; Szufnarowska, J. (2013). Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions. <i>Interaction Studies</i>, <i>14</i>(2), 240–267. <a href=\"https://doi.org/10.1075/is.14.2.05nom\">https://doi.org/10.1075/is.14.2.05nom</a>","short":"I. Nomikou, K. Rohlfing, J. Szufnarowska, Interaction Studies 14 (2013) 240–267.","mla":"Nomikou, Iris, et al. “Educating Attention: Recruiting, Maintaining, and Framing Eye Contact in Early Natural Mother-Infant Interactions.” <i>Interaction Studies</i>, vol. 14, no. 2, John Benjamins Publishing Company, 2013, pp. 240–67, doi:<a href=\"https://doi.org/10.1075/is.14.2.05nom\">10.1075/is.14.2.05nom</a>.","bibtex":"@article{Nomikou_Rohlfing_Szufnarowska_2013, title={Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions}, volume={14}, DOI={<a href=\"https://doi.org/10.1075/is.14.2.05nom\">10.1075/is.14.2.05nom</a>}, number={2}, journal={Interaction Studies}, publisher={John Benjamins Publishing Company}, author={Nomikou, Iris and Rohlfing, Katharina and Szufnarowska, Joanna}, year={2013}, pages={240–267} }","chicago":"Nomikou, Iris, Katharina Rohlfing, and Joanna Szufnarowska. “Educating Attention: Recruiting, Maintaining, and Framing Eye Contact in Early Natural Mother-Infant Interactions.” <i>Interaction Studies</i> 14, no. 2 (2013): 240–67. <a href=\"https://doi.org/10.1075/is.14.2.05nom\">https://doi.org/10.1075/is.14.2.05nom</a>.","ieee":"I. Nomikou, K. Rohlfing, and J. Szufnarowska, “Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions,” <i>Interaction Studies</i>, vol. 14, no. 2, pp. 240–267, 2013, doi: <a href=\"https://doi.org/10.1075/is.14.2.05nom\">10.1075/is.14.2.05nom</a>.","ama":"Nomikou I, Rohlfing K, Szufnarowska J. Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions. <i>Interaction Studies</i>. 2013;14(2):240-267. doi:<a href=\"https://doi.org/10.1075/is.14.2.05nom\">10.1075/is.14.2.05nom</a>"},"publisher":"John Benjamins Publishing Company","date_updated":"2023-02-01T16:12:50Z","volume":14,"author":[{"first_name":"Iris","last_name":"Nomikou","full_name":"Nomikou, Iris"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"},{"first_name":"Joanna","last_name":"Szufnarowska","full_name":"Szufnarowska, Joanna"}],"date_created":"2020-06-24T13:01:23Z","title":"Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions","doi":"10.1075/is.14.2.05nom","publication":"Interaction Studies","type":"journal_article","abstract":[{"lang":"eng","text":"In a longitudinal naturalistic study, we observed German mothers interacting with their infants when they were 3 and 6 months old. Pursuing the idea that infants’ attention is socialized in everyday interactions, we explored whether eye contact is reinforced selectively by behavioral modification in the input provided to infants. Applying a microanalytical approach focusing on the sequential organization of interaction, we explored how the mother draws the infant’s attention to herself and how she tries to maintain attention when the infant is looking at her. Results showed that eye contact is reinforced by specific infant-directed practices: interrogatives and conversational openings, multimodal stimulation, repetition, and imitation. In addition, these practices are contingent on the infant’s own behavior. By comparing the two data points (3 and 6 months), we showed how the education of attention evolves hand-in-hand with the developing capacities of the infant."}],"status":"public","_id":"17204","department":[{"_id":"749"}],"user_id":"14931","keyword":["interactional adaptation","multimodal input","social learning","ecology of attention","eye contact"],"language":[{"iso":"eng"}]},{"doi":"10.1007/s12369-011-0124-9","title":"Generation and evaluation of communicative robot gesture","date_created":"2020-06-24T13:01:48Z","author":[{"first_name":"Maha","last_name":"Salem","full_name":"Salem, Maha"},{"first_name":"Stefan","last_name":"Kopp","full_name":"Kopp, Stefan"},{"first_name":"Ipke","full_name":"Wachsmuth, Ipke","last_name":"Wachsmuth"},{"first_name":"Katharina","last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352"},{"full_name":"Joublin, Frank","last_name":"Joublin","first_name":"Frank"}],"volume":4,"date_updated":"2023-02-01T16:21:50Z","publisher":"Springer Science + Business Media","citation":{"ama":"Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>","chicago":"Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i> 4, no. 2 (2012): 201–17. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>.","ieee":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation and evaluation of communicative robot gesture,” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, pp. 201–217, 2012, doi: <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","apa":"Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012). Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, <i>4</i>(2), 201–217. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>","bibtex":"@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation and evaluation of communicative robot gesture}, volume={4}, DOI={<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>}, number={2}, journal={International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}, publisher={Springer Science + Business Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}, year={2012}, pages={201–217} }","mla":"Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17, doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","short":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012) 201–217."},"page":"201-217","intvolume":"         4","year":"2012","issue":"2","publication_identifier":{"issn":["1875-4805"]},"language":[{"iso":"eng"}],"keyword":["Social Human-Robot Interaction","Multimodal Interaction and Conversational Skills","Robot Companions and Social Robots","Non-verbal Cues and Expressiveness"],"user_id":"14931","department":[{"_id":"749"}],"_id":"17225","status":"public","abstract":[{"text":"How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.","lang":"eng"}],"type":"journal_article","publication":"International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions"},{"doi":"10.1007/s12369-011-0124-9","author":[{"last_name":"Salem","full_name":"Salem, Maha","first_name":"Maha"},{"first_name":"Stefan","full_name":"Kopp, Stefan","last_name":"Kopp"},{"last_name":"Wachsmuth","full_name":"Wachsmuth, Ipke","first_name":"Ipke"},{"first_name":"Katharina","id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing"},{"full_name":"Joublin, Frank","last_name":"Joublin","first_name":"Frank"}],"volume":4,"date_updated":"2023-02-01T12:52:23Z","citation":{"mla":"Salem, Maha, et al. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, Springer Science + Business Media, 2012, pp. 201–17, doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>.","bibtex":"@article{Salem_Kopp_Wachsmuth_Rohlfing_Joublin_2012, title={Generation and evaluation of communicative robot gesture}, volume={4}, DOI={<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>}, number={2}, journal={International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}, publisher={Springer Science + Business Media}, author={Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}, year={2012}, pages={201–217} }","short":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, F. Joublin, International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions 4 (2012) 201–217.","apa":"Salem, M., Kopp, S., Wachsmuth, I., Rohlfing, K., &#38; Joublin, F. (2012). Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, <i>4</i>(2), 201–217. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>","ama":"Salem M, Kopp S, Wachsmuth I, Rohlfing K, Joublin F. Generation and evaluation of communicative robot gesture. <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>. 2012;4(2):201-217. doi:<a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>","chicago":"Salem, Maha, Stefan Kopp, Ipke Wachsmuth, Katharina Rohlfing, and Frank Joublin. “Generation and Evaluation of Communicative Robot Gesture.” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i> 4, no. 2 (2012): 201–17. <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">https://doi.org/10.1007/s12369-011-0124-9</a>.","ieee":"M. Salem, S. Kopp, I. Wachsmuth, K. Rohlfing, and F. Joublin, “Generation and evaluation of communicative robot gesture,” <i>International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions</i>, vol. 4, no. 2, pp. 201–217, 2012, doi: <a href=\"https://doi.org/10.1007/s12369-011-0124-9\">10.1007/s12369-011-0124-9</a>."},"page":"201-217","intvolume":"         4","publication_identifier":{"issn":["1875-4805"]},"user_id":"14931","department":[{"_id":"749"}],"_id":"17428","status":"public","type":"journal_article","title":"Generation and evaluation of communicative robot gesture","date_created":"2020-07-28T11:44:02Z","publisher":"Springer Science + Business Media","year":"2012","issue":"2","language":[{"iso":"eng"}],"keyword":["Social Human-Robot Interaction","Multimodal Interaction and Conversational Skills","Robot Companions and Social Robots","Non-verbal Cues and Expressiveness"],"abstract":[{"text":"How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.","lang":"eng"}],"publication":"International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions"},{"issue":"2","publication_identifier":{"issn":["1943-0612"]},"page":"113-128","intvolume":"         3","citation":{"chicago":"Nomikou, Iris, and Katharina Rohlfing. “Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds.” <i>IEEE Transactions on Autonomous Mental Development</i> 3, no. 2 (2011): 113–28. <a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">https://doi.org/10.1109/TAMD.2011.2140113</a>.","ieee":"I. Nomikou and K. Rohlfing, “Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds,” <i>IEEE Transactions on Autonomous Mental Development</i>, vol. 3, no. 2, pp. 113–128, 2011, doi: <a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">10.1109/TAMD.2011.2140113</a>.","ama":"Nomikou I, Rohlfing K. Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds. <i>IEEE Transactions on Autonomous Mental Development</i>. 2011;3(2):113-128. doi:<a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">10.1109/TAMD.2011.2140113</a>","apa":"Nomikou, I., &#38; Rohlfing, K. (2011). Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds. <i>IEEE Transactions on Autonomous Mental Development</i>, <i>3</i>(2), 113–128. <a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">https://doi.org/10.1109/TAMD.2011.2140113</a>","bibtex":"@article{Nomikou_Rohlfing_2011, title={Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds}, volume={3}, DOI={<a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">10.1109/TAMD.2011.2140113</a>}, number={2}, journal={IEEE Transactions on Autonomous Mental Development}, publisher={Institute of Electrical &#38; Electronics Engineers (IEEE)}, author={Nomikou, Iris and Rohlfing, Katharina}, year={2011}, pages={113–128} }","mla":"Nomikou, Iris, and Katharina Rohlfing. “Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds.” <i>IEEE Transactions on Autonomous Mental Development</i>, vol. 3, no. 2, Institute of Electrical &#38; Electronics Engineers (IEEE), 2011, pp. 113–28, doi:<a href=\"https://doi.org/10.1109/TAMD.2011.2140113\">10.1109/TAMD.2011.2140113</a>.","short":"I. Nomikou, K. Rohlfing, IEEE Transactions on Autonomous Mental Development 3 (2011) 113–128."},"year":"2011","volume":3,"date_created":"2020-06-24T13:02:12Z","author":[{"first_name":"Iris","last_name":"Nomikou","full_name":"Nomikou, Iris"},{"full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing","first_name":"Katharina"}],"date_updated":"2023-02-01T12:54:33Z","publisher":"Institute of Electrical & Electronics Engineers (IEEE)","doi":"10.1109/TAMD.2011.2140113","title":"Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds","publication":"IEEE Transactions on Autonomous Mental Development","type":"journal_article","status":"public","department":[{"_id":"749"}],"user_id":"14931","_id":"17246","language":[{"iso":"eng"}],"keyword":["acoustic packaging","mother-child interaction","social learning","multimodal grounding in input","ecology of interactions","synchrony"]},{"intvolume":"      7072","page":"31-41","citation":{"ama":"Salem M, Eyssel FA, Rohlfing K, Kopp S, Joublin F. Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot. In: Mutlu B, Bartneck C, Ham J, Evers V, Kanda T, eds. <i>Social Robotics</i>. Vol 7072. Springer Science + Business Media; 2011:31-41. doi:<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>","ieee":"M. Salem, F. A. Eyssel, K. Rohlfing, S. Kopp, and F. Joublin, “Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot,” in <i>Social Robotics</i>, 2011, vol. 7072, pp. 31–41, doi: <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>.","chicago":"Salem, Maha, Friederike Anne Eyssel, Katharina Rohlfing, Stefan Kopp, and F. Joublin. “Effects of Gesture on the Perception of Psychological Anthropomorphism: A Case Study with a Humanoid Robot.” In <i>Social Robotics</i>, edited by B. Mutlu, C. Bartneck, J. Ham, V. Evers, and T. Kanda, 7072:31–41. Springer Science + Business Media, 2011. <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">https://doi.org/10.1007/978-3-642-25504-5_4</a>.","apa":"Salem, M., Eyssel, F. A., Rohlfing, K., Kopp, S., &#38; Joublin, F. (2011). Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot. In B. Mutlu, C. Bartneck, J. Ham, V. Evers, &#38; T. Kanda (Eds.), <i>Social Robotics</i> (Vol. 7072, pp. 31–41). Springer Science + Business Media. <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">https://doi.org/10.1007/978-3-642-25504-5_4</a>","mla":"Salem, Maha, et al. “Effects of Gesture on the Perception of Psychological Anthropomorphism: A Case Study with a Humanoid Robot.” <i>Social Robotics</i>, edited by B. Mutlu et al., vol. 7072, Springer Science + Business Media, 2011, pp. 31–41, doi:<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>.","bibtex":"@inproceedings{Salem_Eyssel_Rohlfing_Kopp_Joublin_2011, title={Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}, volume={7072}, DOI={<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>}, booktitle={Social Robotics}, publisher={Springer Science + Business Media}, author={Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}, editor={Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}, year={2011}, pages={31–41} }","short":"M. Salem, F.A. Eyssel, K. Rohlfing, S. Kopp, F. Joublin, in: B. Mutlu, C. Bartneck, J. Ham, V. Evers, T. Kanda (Eds.), Social Robotics, Springer Science + Business Media, 2011, pp. 31–41."},"year":"2011","publication_identifier":{"isbn":["978-3-642-25503-8"]},"doi":"10.1007/978-3-642-25504-5_4","title":"Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot","volume":7072,"date_created":"2020-07-28T11:44:04Z","author":[{"full_name":"Salem, Maha","last_name":"Salem","first_name":"Maha"},{"first_name":"Friederike Anne","full_name":"Eyssel, Friederike Anne","last_name":"Eyssel"},{"full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing","first_name":"Katharina"},{"last_name":"Kopp","full_name":"Kopp, Stefan","first_name":"Stefan"},{"full_name":"Joublin, F.","last_name":"Joublin","first_name":"F."}],"publisher":"Springer Science + Business Media","date_updated":"2023-02-01T12:52:02Z","status":"public","abstract":[{"text":"Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.","lang":"eng"}],"editor":[{"first_name":"B.","full_name":"Mutlu, B.","last_name":"Mutlu"},{"last_name":"Bartneck","full_name":"Bartneck, C.","first_name":"C."},{"last_name":"Ham","full_name":"Ham, J.","first_name":"J."},{"first_name":"V.","last_name":"Evers","full_name":"Evers, V."},{"first_name":"T.","full_name":"Kanda, T.","last_name":"Kanda"}],"publication":"Social Robotics","type":"conference","language":[{"iso":"eng"}],"keyword":["Multimodal Interaction and Conversational Skills","Anthropomorphism","Non-verbal Cues and Expressiveness"],"department":[{"_id":"749"}],"user_id":"14931","_id":"17430"},{"title":"Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios","date_updated":"2023-02-01T12:53:54Z","date_created":"2020-06-24T13:02:10Z","author":[{"first_name":"Lars","full_name":"Schillingmann, Lars","last_name":"Schillingmann"},{"last_name":"Wagner","full_name":"Wagner, Petra","first_name":"Petra"},{"first_name":"Christian","full_name":"Munier, Christian","last_name":"Munier"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"},{"first_name":"Katharina","full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing"}],"year":"2011","page":"3105-3108","citation":{"ama":"Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios. In: <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>. ; 2011:3105-3108.","chicago":"Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and Katharina Rohlfing. “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios.” In <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 3105–8, 2011.","ieee":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios,” in <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 2011, pp. 3105–3108.","mla":"Schillingmann, Lars, et al. “Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios.” <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 2011, pp. 3105–08.","bibtex":"@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}, booktitle={Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}, author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}, year={2011}, pages={3105–3108} }","short":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: Interspeech 2011 (12th Annual Conference of the International Speech Communication Association), 2011, pp. 3105–3108.","apa":"Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011). Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios. <i>Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)</i>, 3105–3108."},"keyword":["Feedback","Human Robot Interaction","Prominence","Multimodal Action Segmentation"],"language":[{"iso":"eng"}],"_id":"17244","department":[{"_id":"749"}],"user_id":"14931","abstract":[{"lang":"eng","text":"Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback"}],"status":"public","publication":"Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)","type":"conference"},{"keyword":["Prominence","Multimodal Action Segmentation","Feedback","Color Saliency","Human Robot Interaction"],"language":[{"iso":"eng"}],"_id":"17245","department":[{"_id":"749"}],"user_id":"14931","status":"public","type":"conference","title":"Acoustic Packaging and the Learning of Words","doi":"10.3389/conf.fncom.2011.52.00020","date_updated":"2023-02-01T12:54:16Z","date_created":"2020-06-24T13:02:11Z","author":[{"last_name":"Schillingmann","full_name":"Schillingmann, Lars","first_name":"Lars"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"full_name":"Munier, Christian","last_name":"Munier","first_name":"Christian"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"},{"first_name":"Katharina","id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing"}],"year":"2011","citation":{"ama":"Schillingmann L, Wagner P, Munier C, Wrede B, Rohlfing K. Acoustic Packaging and the Learning of Words. In: ; 2011. doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>","chicago":"Schillingmann, Lars, Petra Wagner, Christian Munier, Britta Wrede, and Katharina Rohlfing. “Acoustic Packaging and the Learning of Words,” 2011. <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>.","ieee":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, and K. Rohlfing, “Acoustic Packaging and the Learning of Words,” 2011, doi: <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>.","apa":"Schillingmann, L., Wagner, P., Munier, C., Wrede, B., &#38; Rohlfing, K. (2011). <i>Acoustic Packaging and the Learning of Words</i>. <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">https://doi.org/10.3389/conf.fncom.2011.52.00020</a>","bibtex":"@inproceedings{Schillingmann_Wagner_Munier_Wrede_Rohlfing_2011, title={Acoustic Packaging and the Learning of Words}, DOI={<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>}, author={Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}, year={2011} }","mla":"Schillingmann, Lars, et al. <i>Acoustic Packaging and the Learning of Words</i>. 2011, doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00020\">10.3389/conf.fncom.2011.52.00020</a>.","short":"L. Schillingmann, P. Wagner, C. Munier, B. Wrede, K. Rohlfing, in: 2011."},"publication_identifier":{"issn":["1662-5188"]}},{"year":"2011","citation":{"apa":"Salem, M., Eyssel, F. A., Rohlfing, K., Kopp, S., &#38; Joublin, F. (2011). Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot. In B. Mutlu, C. Bartneck, J. Ham, V. Evers, &#38; T. Kanda (Eds.), <i>Social Robotics</i> (Vol. 7072, pp. 31–41). Springer Science + Business Media. <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">https://doi.org/10.1007/978-3-642-25504-5_4</a>","bibtex":"@inproceedings{Salem_Eyssel_Rohlfing_Kopp_Joublin_2011, title={Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}, volume={7072}, DOI={<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>}, booktitle={Social Robotics}, publisher={Springer Science + Business Media}, author={Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}, editor={Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}, year={2011}, pages={31–41} }","short":"M. Salem, F.A. Eyssel, K. Rohlfing, S. Kopp, F. Joublin, in: B. Mutlu, C. Bartneck, J. Ham, V. Evers, T. Kanda (Eds.), Social Robotics, Springer Science + Business Media, 2011, pp. 31–41.","mla":"Salem, Maha, et al. “Effects of Gesture on the Perception of Psychological Anthropomorphism: A Case Study with a Humanoid Robot.” <i>Social Robotics</i>, edited by B. Mutlu et al., vol. 7072, Springer Science + Business Media, 2011, pp. 31–41, doi:<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>.","chicago":"Salem, Maha, Friederike Anne Eyssel, Katharina Rohlfing, Stefan Kopp, and F. Joublin. “Effects of Gesture on the Perception of Psychological Anthropomorphism: A Case Study with a Humanoid Robot.” In <i>Social Robotics</i>, edited by B. Mutlu, C. Bartneck, J. Ham, V. Evers, and T. Kanda, 7072:31–41. Springer Science + Business Media, 2011. <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">https://doi.org/10.1007/978-3-642-25504-5_4</a>.","ieee":"M. Salem, F. A. Eyssel, K. Rohlfing, S. Kopp, and F. Joublin, “Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot,” in <i>Social Robotics</i>, 2011, vol. 7072, pp. 31–41, doi: <a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>.","ama":"Salem M, Eyssel FA, Rohlfing K, Kopp S, Joublin F. Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot. In: Mutlu B, Bartneck C, Ham J, Evers V, Kanda T, eds. <i>Social Robotics</i>. Vol 7072. Springer Science + Business Media; 2011:31-41. doi:<a href=\"https://doi.org/10.1007/978-3-642-25504-5_4\">10.1007/978-3-642-25504-5_4</a>"},"page":"31-41","intvolume":"      7072","publication_identifier":{"isbn":["978-3-642-25503-8"]},"title":"Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot","doi":"10.1007/978-3-642-25504-5_4","date_updated":"2023-02-01T12:58:57Z","publisher":"Springer Science + Business Media","date_created":"2020-06-24T13:02:07Z","author":[{"full_name":"Salem, Maha","last_name":"Salem","first_name":"Maha"},{"first_name":"Friederike Anne","full_name":"Eyssel, Friederike Anne","last_name":"Eyssel"},{"full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing","first_name":"Katharina"},{"last_name":"Kopp","full_name":"Kopp, Stefan","first_name":"Stefan"},{"first_name":"F.","last_name":"Joublin","full_name":"Joublin, F."}],"volume":7072,"abstract":[{"text":"Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.","lang":"eng"}],"editor":[{"full_name":"Mutlu, B.","last_name":"Mutlu","first_name":"B."},{"last_name":"Bartneck","full_name":"Bartneck, C.","first_name":"C."},{"last_name":"Ham","full_name":"Ham, J.","first_name":"J."},{"last_name":"Evers","full_name":"Evers, V.","first_name":"V."},{"last_name":"Kanda","full_name":"Kanda, T.","first_name":"T."}],"status":"public","type":"conference","publication":"Social Robotics","keyword":["Multimodal Interaction and Conversational Skills","Anthropomorphism","Non-verbal Cues and Expressiveness"],"language":[{"iso":"eng"}],"_id":"17242","user_id":"14931","department":[{"_id":"749"}]},{"type":"journal_article","publication":"IEEE Journal of Selected Topics in Signal Processing","status":"public","abstract":[{"lang":"eng","text":"For an environment to be perceived as being smart, contextual information has to be gathered to adapt the system's behavior and its interface towards the user. Being a rich source of context information speech can be acquired unobtrusively by microphone arrays and then processed to extract information about the user and his environment. In this paper, a system for joint temporal segmentation, speaker localization, and identification is presented, which is supported by face identification from video data obtained from a steerable camera. Special attention is paid to latency aspects and online processing capabilities, as they are important for the application under investigation, namely ambient communication. It describes the vision of terminal-less, session-less and multi-modal telecommunication with remote partners, where the user can move freely within his home while the communication follows him. The speaker diarization serves as a context source, which has been integrated in a service-oriented middleware architecture and provided to the application to select the most appropriate I/O device and to steer the camera towards the speaker during ambient communication."}],"user_id":"460","department":[{"_id":"54"}],"_id":"11892","language":[{"iso":"eng"}],"keyword":["audio streaming","audio visual data streaming","context information speech","face identification","face recognition","image segmentation","middleware","multimodal telecommunication","online diarization","service oriented middleware architecture","sessionless telecommunication","software architecture","speaker identification","speaker localization","speaker recognition","steerable camera","telecommunication computing","temporal segmentation","terminal-less telecommunication","video streaming"],"issue":"5","quality_controlled":"1","citation":{"short":"J. Schmalenstroeer, R. Haeb-Umbach, IEEE Journal of Selected Topics in Signal Processing 4 (2010) 845–856.","bibtex":"@article{Schmalenstroeer_Haeb-Umbach_2010, title={Online Diarization of Streaming Audio-Visual Data for Smart Environments}, volume={4}, DOI={<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>}, number={5}, journal={IEEE Journal of Selected Topics in Signal Processing}, author={Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}, year={2010}, pages={845–856} }","mla":"Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization of Streaming Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected Topics in Signal Processing</i>, vol. 4, no. 5, 2010, pp. 845–56, doi:<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>.","apa":"Schmalenstroeer, J., &#38; Haeb-Umbach, R. (2010). Online Diarization of Streaming Audio-Visual Data for Smart Environments. <i>IEEE Journal of Selected Topics in Signal Processing</i>, <i>4</i>(5), 845–856. <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">https://doi.org/10.1109/JSTSP.2010.2050519</a>","ama":"Schmalenstroeer J, Haeb-Umbach R. Online Diarization of Streaming Audio-Visual Data for Smart Environments. <i>IEEE Journal of Selected Topics in Signal Processing</i>. 2010;4(5):845-856. doi:<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>","ieee":"J. Schmalenstroeer and R. Haeb-Umbach, “Online Diarization of Streaming Audio-Visual Data for Smart Environments,” <i>IEEE Journal of Selected Topics in Signal Processing</i>, vol. 4, no. 5, pp. 845–856, 2010, doi: <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>.","chicago":"Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization of Streaming Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected Topics in Signal Processing</i> 4, no. 5 (2010): 845–56. <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">https://doi.org/10.1109/JSTSP.2010.2050519</a>."},"intvolume":"         4","page":"845-856","year":"2010","date_created":"2019-07-12T05:30:16Z","author":[{"first_name":"Joerg","last_name":"Schmalenstroeer","id":"460","full_name":"Schmalenstroeer, Joerg"},{"last_name":"Haeb-Umbach","id":"242","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"volume":4,"oa":"1","date_updated":"2023-10-26T08:10:18Z","main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2010/ScHa10.pdf","open_access":"1"}],"doi":"10.1109/JSTSP.2010.2050519","title":"Online Diarization of Streaming Audio-Visual Data for Smart Environments"},{"keyword":["task- oriented dialogue","Late Talker","maternal multimodal input","gestural motherese"],"language":[{"iso":"eng"}],"_id":"17256","user_id":"14931","department":[{"_id":"749"}],"status":"public","type":"journal_article","publication":"Gesture","title":"Children's lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development","doi":"10.1075/gest.10.2-3.07gri","publisher":"John Benjamins Publishing Company","date_updated":"2023-02-01T13:01:23Z","author":[{"first_name":"Angela","last_name":"Grimminger","full_name":"Grimminger, Angela","id":"57578"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"first_name":"Prisca","last_name":"Stenneken","full_name":"Stenneken, Prisca"}],"date_created":"2020-06-24T13:02:24Z","volume":10,"year":"2010","citation":{"ieee":"A. Grimminger, K. Rohlfing, and P. Stenneken, “Children’s lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development,” <i>Gesture</i>, vol. 10, no. 2, pp. 251–278, 2010, doi: <a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">10.1075/gest.10.2-3.07gri</a>.","chicago":"Grimminger, Angela, Katharina Rohlfing, and Prisca Stenneken. “Children’s Lexical Skills and Task Demands Affect Gestural Behavior in Mothers of Late-Talking Children and Children with Typical Language Development.” <i>Gesture</i> 10, no. 2 (2010): 251–78. <a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">https://doi.org/10.1075/gest.10.2-3.07gri</a>.","ama":"Grimminger A, Rohlfing K, Stenneken P. Children’s lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development. <i>Gesture</i>. 2010;10(2):251-278. doi:<a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">10.1075/gest.10.2-3.07gri</a>","apa":"Grimminger, A., Rohlfing, K., &#38; Stenneken, P. (2010). Children’s lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development. <i>Gesture</i>, <i>10</i>(2), 251–278. <a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">https://doi.org/10.1075/gest.10.2-3.07gri</a>","short":"A. Grimminger, K. Rohlfing, P. Stenneken, Gesture 10 (2010) 251–278.","mla":"Grimminger, Angela, et al. “Children’s Lexical Skills and Task Demands Affect Gestural Behavior in Mothers of Late-Talking Children and Children with Typical Language Development.” <i>Gesture</i>, vol. 10, no. 2, John Benjamins Publishing Company, 2010, pp. 251–78, doi:<a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">10.1075/gest.10.2-3.07gri</a>.","bibtex":"@article{Grimminger_Rohlfing_Stenneken_2010, title={Children’s lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development}, volume={10}, DOI={<a href=\"https://doi.org/10.1075/gest.10.2-3.07gri\">10.1075/gest.10.2-3.07gri</a>}, number={2}, journal={Gesture}, publisher={John Benjamins Publishing Company}, author={Grimminger, Angela and Rohlfing, Katharina and Stenneken, Prisca}, year={2010}, pages={251–278} }"},"intvolume":"        10","page":"251-278","publication_identifier":{"issn":["1569-9773"]},"issue":"2"},{"keyword":["gaze","gesture","Multimodal","adult-child interaction"],"language":[{"iso":"eng"}],"_id":"17259","user_id":"14931","department":[{"_id":"749"}],"abstract":[{"lang":"eng","text":"Learning is a social endeavor, in which the learner generally receives support from his/her social partner(s). In developmental research – even though tutors/adults behavior modifications in their speech, gestures and motions have been extensively studied, studies barely consider the recipient’s (i.e. the child’s) perspective in the analysis of the adult’s presentation, In addition, the variability in parental behavior, i.e. the fact that not every parent modifies her/his behavior in the same way, found less fine-grained analysis. In contrast, in this paper, we assume an interactional perspective investigating the loop between the tutor’s and the learner’s actions. With this approach, we aim both at discovering the levels and features of variability and at achieving a better understanding of how they come about within the course of the interaction. For our analysis, we used a combination of (1) qualitative investigation derived from ethnomethodological Conversation Analysis (CA), (2) semi-automatic computational 2D hand tracking and (3) a mathematically based visualization of the data. Our analysis reveals that tutors not only shape their demonstrations differently with regard to the intended recipient per se (adult-directed vs. child-directed), but most importantly that the learner’s feedback during the presentation is consequential for the concrete ways in which the presentation is carried out."}],"status":"public","type":"conference","publication":"Gesture and Speech in Interaction","title":"On the loop of action modification and the recipient's gaze in adult-child interaction","date_updated":"2023-02-01T13:02:31Z","author":[{"last_name":"Pitsch","full_name":"Pitsch, Karola","first_name":"Karola"},{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa"},{"last_name":"Fritsch","full_name":"Fritsch, Jannik","first_name":"Jannik"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina","id":"50352","first_name":"Katharina"},{"last_name":"Sagerer","full_name":"Sagerer, Gerhard","first_name":"Gerhard"}],"date_created":"2020-06-24T13:02:27Z","year":"2009","citation":{"short":"K. Pitsch, A.-L. Vollmer, J. Fritsch, B. Wrede, K. Rohlfing, G. Sagerer, in: Gesture and Speech in Interaction, 2009.","bibtex":"@inproceedings{Pitsch_Vollmer_Fritsch_Wrede_Rohlfing_Sagerer_2009, title={On the loop of action modification and the recipient’s gaze in adult-child interaction}, booktitle={Gesture and Speech in Interaction}, author={Pitsch, Karola and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta and Rohlfing, Katharina and Sagerer, Gerhard}, year={2009} }","mla":"Pitsch, Karola, et al. “On the Loop of Action Modification and the Recipient’s Gaze in Adult-Child Interaction.” <i>Gesture and Speech in Interaction</i>, 2009.","apa":"Pitsch, K., Vollmer, A.-L., Fritsch, J., Wrede, B., Rohlfing, K., &#38; Sagerer, G. (2009). On the loop of action modification and the recipient’s gaze in adult-child interaction. <i>Gesture and Speech in Interaction</i>.","ieee":"K. Pitsch, A.-L. Vollmer, J. Fritsch, B. Wrede, K. Rohlfing, and G. Sagerer, “On the loop of action modification and the recipient’s gaze in adult-child interaction,” 2009.","chicago":"Pitsch, Karola, Anna-Lisa Vollmer, Jannik Fritsch, Britta Wrede, Katharina Rohlfing, and Gerhard Sagerer. “On the Loop of Action Modification and the Recipient’s Gaze in Adult-Child Interaction.” In <i>Gesture and Speech in Interaction</i>, 2009.","ama":"Pitsch K, Vollmer A-L, Fritsch J, Wrede B, Rohlfing K, Sagerer G. On the loop of action modification and the recipient’s gaze in adult-child interaction. In: <i>Gesture and Speech in Interaction</i>. ; 2009."}},{"abstract":[{"lang":"eng","text":"In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners."}],"status":"public","publication":"Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning","type":"conference","keyword":["robot simulation","hand movement velocity","robotic interaction partner","robotic agent","robot-directed interaction","multimodal analysis","Motionese","Motherese","intelligent tutoring systems","immature cognitive capability","human computer interaction","eye gaze","child-directed speech","child-directed motion","bottom-up system","bottom-up saliency-based attention model","adult-robot interaction","adult-child interaction","adult-adult interaction","human-robot interaction","action learning","social learning scenario","social robotics","software agents","top-down feedback structures","tutoring behavior"],"language":[{"iso":"eng"}],"_id":"17272","department":[{"_id":"749"}],"user_id":"14931","year":"2009","page":"1-6","citation":{"short":"A.-L. Vollmer, K.S. Lohan, K. Fischer, Y. Nagai, K. Pitsch, J. Fritsch, K. Rohlfing, B. Wrede, in: Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning, IEEE, 2009, pp. 1–6.","bibtex":"@inproceedings{Vollmer_Lohan_Fischer_Nagai_Pitsch_Fritsch_Rohlfing_Wrede_2009, title={People modify their tutoring behavior in robot-directed interaction for action learning}, DOI={<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>}, booktitle={Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}, publisher={IEEE}, author={Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}, year={2009}, pages={1–6} }","mla":"Vollmer, Anna-Lisa, et al. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, IEEE, 2009, pp. 1–6, doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>.","ama":"Vollmer A-L, Lohan KS, Fischer K, et al. People modify their tutoring behavior in robot-directed interaction for action learning. In: <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>. IEEE; 2009:1-6. doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>","apa":"Vollmer, A.-L., Lohan, K. S., Fischer, K., Nagai, Y., Pitsch, K., Fritsch, J., Rohlfing, K., &#38; Wrede, B. (2009). People modify their tutoring behavior in robot-directed interaction for action learning. <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>","ieee":"A.-L. Vollmer <i>et al.</i>, “People modify their tutoring behavior in robot-directed interaction for action learning,” in <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 2009, pp. 1–6, doi: <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>.","chicago":"Vollmer, Anna-Lisa, Katrin Solveig Lohan, Kerstin Fischer, Yukie Nagai, Karola Pitsch, Jannik Fritsch, Katharina Rohlfing, and Britta Wrede. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” In <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. IEEE, 2009. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>."},"title":"People modify their tutoring behavior in robot-directed interaction for action learning","doi":"10.1109/DEVLRN.2009.5175516","date_updated":"2023-02-01T13:06:43Z","publisher":"IEEE","date_created":"2020-06-24T13:02:43Z","author":[{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa"},{"full_name":"Lohan, Katrin Solveig","last_name":"Lohan","first_name":"Katrin Solveig"},{"full_name":"Fischer, Kerstin","last_name":"Fischer","first_name":"Kerstin"},{"last_name":"Nagai","full_name":"Nagai, Yukie","first_name":"Yukie"},{"first_name":"Karola","last_name":"Pitsch","full_name":"Pitsch, Karola"},{"first_name":"Jannik","last_name":"Fritsch","full_name":"Fritsch, Jannik"},{"id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing","first_name":"Katharina"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}]},{"year":"2009","citation":{"ama":"Schillingmann L, Wrede B, Rohlfing K. Towards a Computational Model of Acoustic Packaging. In: <i>International Conference on Development and Learning (ICDL 2009)</i>. IEEE; 2009. doi:<a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">10.1109/devlrn.2009.5175523</a>","ieee":"L. Schillingmann, B. Wrede, and K. Rohlfing, “Towards a Computational Model of Acoustic Packaging,” 2009, doi: <a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">10.1109/devlrn.2009.5175523</a>.","chicago":"Schillingmann, Lars, Britta Wrede, and Katharina Rohlfing. “Towards a Computational Model of Acoustic Packaging.” In <i>International Conference on Development and Learning (ICDL 2009)</i>. IEEE, 2009. <a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">https://doi.org/10.1109/devlrn.2009.5175523</a>.","apa":"Schillingmann, L., Wrede, B., &#38; Rohlfing, K. (2009). Towards a Computational Model of Acoustic Packaging. <i>International Conference on Development and Learning (ICDL 2009)</i>. <a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">https://doi.org/10.1109/devlrn.2009.5175523</a>","short":"L. Schillingmann, B. Wrede, K. Rohlfing, in: International Conference on Development and Learning (ICDL 2009), IEEE, 2009.","bibtex":"@inproceedings{Schillingmann_Wrede_Rohlfing_2009, title={Towards a Computational Model of Acoustic Packaging}, DOI={<a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">10.1109/devlrn.2009.5175523</a>}, booktitle={International Conference on Development and Learning (ICDL 2009)}, publisher={IEEE}, author={Schillingmann, Lars and Wrede, Britta and Rohlfing, Katharina}, year={2009} }","mla":"Schillingmann, Lars, et al. “Towards a Computational Model of Acoustic Packaging.” <i>International Conference on Development and Learning (ICDL 2009)</i>, IEEE, 2009, doi:<a href=\"https://doi.org/10.1109/devlrn.2009.5175523\">10.1109/devlrn.2009.5175523</a>."},"publication_identifier":{"isbn":["978-1-4244-4117-4"]},"title":"Towards a Computational Model of Acoustic Packaging","doi":"10.1109/devlrn.2009.5175523","date_updated":"2023-02-01T13:05:32Z","publisher":"IEEE","date_created":"2020-06-24T13:02:38Z","author":[{"full_name":"Schillingmann, Lars","last_name":"Schillingmann","first_name":"Lars"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"},{"full_name":"Rohlfing, Katharina","id":"50352","last_name":"Rohlfing","first_name":"Katharina"}],"status":"public","type":"conference","publication":"International Conference on Development and Learning (ICDL 2009)","keyword":["Acoustic Packaging","multimodal"],"language":[{"iso":"eng"}],"_id":"17268","user_id":"14931","department":[{"_id":"749"}]},{"_id":"38543","department":[{"_id":"672"}],"user_id":"5786","keyword":["User Interface     Interaction Manager     Output Device     Multimodal Interface     Interaction Object"],"language":[{"iso":"eng"}],"publication":"Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA'2006)","type":"conference","abstract":[{"text":"Today a large variety of mobile interaction devices such as PDAs and mobile phones enforce the development of a wide range of user interfaces for each platform. The complexity even grows, when multiple interaction devices are used to perform the same task and when different modalities have to be supported. We introduce a new dialog model for the abstraction of concrete user interfaces with a separate advanced control layer for the integration of different modalities. In this context, we present the Dialog and Interface Specification Language (DISL), which comes with a proof-of-concept implementation.","lang":"eng"}],"status":"public","date_updated":"2023-01-24T08:03:56Z","date_created":"2023-01-24T08:03:51Z","author":[{"first_name":"Robbie","full_name":"Schäfer, Robbie","last_name":"Schäfer"},{"first_name":"Steffen","full_name":"Bleul, Steffen","last_name":"Bleul"},{"first_name":"Wolfgang","last_name":"Müller","id":"16243","full_name":"Müller, Wolfgang"}],"title":"Dialog Modelling for Multiple Devices and Multiple Interaction Modalities","doi":"10.1007/978-3-540-70816-2_4","publication_identifier":{"isbn":["978-3-540-70815-5"]},"year":"2006","place":"Hasselt, Belgium","citation":{"ieee":"R. Schäfer, S. Bleul, and W. Müller, “Dialog Modelling for Multiple Devices and Multiple Interaction Modalities,” 2006, doi: <a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">10.1007/978-3-540-70816-2_4</a>.","chicago":"Schäfer, Robbie, Steffen Bleul, and Wolfgang Müller. “Dialog Modelling for Multiple Devices and Multiple Interaction Modalities.” In <i>Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>. Hasselt, Belgium, 2006. <a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">https://doi.org/10.1007/978-3-540-70816-2_4</a>.","ama":"Schäfer R, Bleul S, Müller W. Dialog Modelling for Multiple Devices and Multiple Interaction Modalities. In: <i>Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>. ; 2006. doi:<a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">10.1007/978-3-540-70816-2_4</a>","apa":"Schäfer, R., Bleul, S., &#38; Müller, W. (2006). Dialog Modelling for Multiple Devices and Multiple Interaction Modalities. <i>Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>. <a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">https://doi.org/10.1007/978-3-540-70816-2_4</a>","bibtex":"@inproceedings{Schäfer_Bleul_Müller_2006, place={Hasselt, Belgium}, title={Dialog Modelling for Multiple Devices and Multiple Interaction Modalities}, DOI={<a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">10.1007/978-3-540-70816-2_4</a>}, booktitle={Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)}, author={Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}, year={2006} }","short":"R. Schäfer, S. Bleul, W. Müller, in: Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006), Hasselt, Belgium, 2006.","mla":"Schäfer, Robbie, et al. “Dialog Modelling for Multiple Devices and Multiple Interaction Modalities.” <i>Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA’2006)</i>, 2006, doi:<a href=\"https://doi.org/10.1007/978-3-540-70816-2_4\">10.1007/978-3-540-70816-2_4</a>."}},{"date_created":"2023-01-24T09:26:58Z","author":[{"last_name":"Schäfer","full_name":"Schäfer, Robbie","first_name":"Robbie"},{"full_name":"Bleul, Steffen","last_name":"Bleul","first_name":"Steffen"},{"first_name":"Wolfgang","id":"16243","full_name":"Müller, Wolfgang","last_name":"Müller"}],"date_updated":"2023-01-24T09:27:03Z","title":"A Novel Dialog Model for the Design of Multimodal User Interfaces","citation":{"ama":"Schäfer R, Bleul S, Müller W. A Novel Dialog Model for the Design of Multimodal User Interfaces. In: <i>Proceedings of EHCI-DSVIS 2005</i>. Lecture Notes in Computer Science . ; 2004.","chicago":"Schäfer, Robbie, Steffen Bleul, and Wolfgang Müller. “A Novel Dialog Model for the Design of Multimodal User Interfaces.” In <i>Proceedings of EHCI-DSVIS 2005</i>. Lecture Notes in Computer Science . Tremsbüttel, Hamburg, 2004.","ieee":"R. Schäfer, S. Bleul, and W. Müller, “A Novel Dialog Model for the Design of Multimodal User Interfaces,” 2004.","apa":"Schäfer, R., Bleul, S., &#38; Müller, W. (2004). A Novel Dialog Model for the Design of Multimodal User Interfaces. <i>Proceedings of EHCI-DSVIS 2005</i>.","mla":"Schäfer, Robbie, et al. “A Novel Dialog Model for the Design of Multimodal User Interfaces.” <i>Proceedings of EHCI-DSVIS 2005</i>, 2004.","short":"R. Schäfer, S. Bleul, W. Müller, in: Proceedings of EHCI-DSVIS 2005, Tremsbüttel, Hamburg, 2004.","bibtex":"@inproceedings{Schäfer_Bleul_Müller_2004, place={Tremsbüttel, Hamburg}, series={Lecture Notes in Computer Science }, title={A Novel Dialog Model for the Design of Multimodal User Interfaces}, booktitle={Proceedings of EHCI-DSVIS 2005}, author={Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}, year={2004}, collection={Lecture Notes in Computer Science } }"},"year":"2004","place":"Tremsbüttel, Hamburg","series_title":"Lecture Notes in Computer Science ","user_id":"5786","department":[{"_id":"672"}],"_id":"39350","language":[{"iso":"eng"}],"keyword":["Multimodal User Interface     High Level Model     Multimodal User     High Level Approach     Dialog Model"],"type":"conference","publication":"Proceedings of EHCI-DSVIS 2005","status":"public","abstract":[{"lang":"eng","text":"Variation in different mobile devices with different capabilities and interaction modalities as well as changing user context in nomadic applications, poses huge challenges to the design of user interfaces. To avoid multiple designs for each device or modality, it is almost a must to employ a model-based approach. In this short paper, we present a new dialog model for multimodal interaction together with an advanced control model, which can either be used for direct modeling by an interface designer or in conjunction with higher level models."}]}]
