@article{63053,
  author       = {{Hernández, Carlos and Rodriguez-Fernandez, Angel E. and Schäpermeier, Lennart and Cuate, Oliver and Trautmann, Heike and Schütze, Oliver}},
  journal      = {{IEEE Transactions on Evolutionary Computation}},
  keywords     = {{Optimization, Evolutionary computation, Hands, Proposals, Convergence, Computational efficiency, Artificial intelligence, Accuracy, Approximation algorithms, Aerospace electronics, Multi-objective optimization, evolutionary algorithms, nearly optimal solutions, multimodal optimization, archiving, continuation}},
  pages        = {{1--1}},
  title        = {{{An Evolutionary Approach for the Computation of ∈-Locally Optimal Solutions for Multi-Objective Multimodal Optimization}}},
  doi          = {{10.1109/TEVC.2025.3637276}},
  year         = {{2025}},
}

@article{56221,
  author       = {{Rodriguez-Fernandez, Angel E. and Schäpermeier, Lennart and Hernández, Carlos and Kerschke, Pascal and Trautmann, Heike and Schütze, Oliver}},
  journal      = {{IEEE Transactions on Evolutionary Computation}},
  keywords     = {{Optimization, Evolutionary computation, Approximation algorithms, Benchmark testing, Vectors, Surveys, Pareto optimization, multi-objective optimization, evolutionary computation, multimodal optimization, local solutions}},
  pages        = {{1--1}},
  title        = {{{Finding ϵ-Locally Optimal Solutions for Multi-Objective Multimodal Optimization}}},
  doi          = {{10.1109/TEVC.2024.3458855}},
  year         = {{2024}},
}

@article{59888,
  abstract     = {{Everyday explanations are interactive processes with the aim to provide a less knowledgeable person with reasonable information about other people, objects, or events. Because explanations are interactive communicative processes, the topical structure of an explanation may vary dynamically depending on the immediate feedback of the explainee. In this paper, we analyse topical transitions in medical explanations organised by different physicians (explainers) related to different forms of multimodal behaviour of caregivers (explainees) attending an explanation about the procedures of
an upcoming surgery of a child. The analyses reveal that explainees’ multimodal behaviour with gaze shifts (and particularly gaze aversion) can predict a transition from an elaborated topic to a new one, whereas explainees’ forms of multimodal behaviour with static gaze cannot be related to changes of the topical structure.}},
  author       = {{Lazarov, Stefan Teodorov and Biermeier, Kai and Grimminger, Angela}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{explanations, multimodal behaviour, elaborations, conditional probabilities}},
  number       = {{3}},
  pages        = {{257 -- 280}},
  publisher    = {{John Benjamins}},
  title        = {{{Changes in the topical structure of explanations are related to explainees’ multimodal behaviour}}},
  doi          = {{10.1075/is.23033.laz}},
  volume       = {{25}},
  year         = {{2024}},
}

@article{46318,
  abstract     = {{Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments.}},
  author       = {{Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}},
  issn         = {{0305-0548}},
  journal      = {{Computers & Operations Research}},
  keywords     = {{Multimodal optimization, Multi-objective continuous optimization, Landscape analysis, Visualization, Benchmarking, Theory, Algorithms}},
  pages        = {{105489}},
  title        = {{{Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}}},
  doi          = {{https://doi.org/10.1016/j.cor.2021.105489}},
  volume       = {{136}},
  year         = {{2021}},
}

@article{17204,
  abstract     = {{In a longitudinal naturalistic study, we observed German mothers interacting with their infants when they were 3 and 6 months old. Pursuing the idea that infants’ attention is socialized in everyday interactions, we explored whether eye contact is reinforced selectively by behavioral modification in the input provided to infants. Applying a microanalytical approach focusing on the sequential organization of interaction, we explored how the mother draws the infant’s attention to herself and how she tries to maintain attention when the infant is looking at her. Results showed that eye contact is reinforced by specific infant-directed practices: interrogatives and conversational openings, multimodal stimulation, repetition, and imitation. In addition, these practices are contingent on the infant’s own behavior. By comparing the two data points (3 and 6 months), we showed how the education of attention evolves hand-in-hand with the developing capacities of the infant.}},
  author       = {{Nomikou, Iris and Rohlfing, Katharina and Szufnarowska, Joanna}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{interactional adaptation, multimodal input, social learning, ecology of attention, eye contact}},
  number       = {{2}},
  pages        = {{240--267}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Educating attention: recruiting, maintaining, and framing eye contact in early natural mother-infant interactions}}},
  doi          = {{10.1075/is.14.2.05nom}},
  volume       = {{14}},
  year         = {{2013}},
}

@article{17225,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@article{17428,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@article{17246,
  author       = {{Nomikou, Iris and Rohlfing, Katharina}},
  issn         = {{1943-0612}},
  journal      = {{IEEE Transactions on Autonomous Mental Development}},
  keywords     = {{acoustic packaging, mother-child interaction, social learning, multimodal grounding in input, ecology of interactions, synchrony}},
  number       = {{2}},
  pages        = {{113--128}},
  publisher    = {{Institute of Electrical & Electronics Engineers (IEEE)}},
  title        = {{{Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds}}},
  doi          = {{10.1109/TAMD.2011.2140113}},
  volume       = {{3}},
  year         = {{2011}},
}

@inproceedings{17430,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@inproceedings{17244,
  abstract     = {{Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback}},
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}},
  keywords     = {{Feedback, Human Robot Interaction, Prominence, Multimodal Action Segmentation}},
  pages        = {{3105--3108}},
  title        = {{{Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}}},
  year         = {{2011}},
}

@inproceedings{17245,
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  issn         = {{1662-5188}},
  keywords     = {{Prominence, Multimodal Action Segmentation, Feedback, Color Saliency, Human Robot Interaction}},
  title        = {{{Acoustic Packaging and the Learning of Words}}},
  doi          = {{10.3389/conf.fncom.2011.52.00020}},
  year         = {{2011}},
}

@inproceedings{17242,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@article{11892,
  abstract     = {{For an environment to be perceived as being smart, contextual information has to be gathered to adapt the system's behavior and its interface towards the user. Being a rich source of context information speech can be acquired unobtrusively by microphone arrays and then processed to extract information about the user and his environment. In this paper, a system for joint temporal segmentation, speaker localization, and identification is presented, which is supported by face identification from video data obtained from a steerable camera. Special attention is paid to latency aspects and online processing capabilities, as they are important for the application under investigation, namely ambient communication. It describes the vision of terminal-less, session-less and multi-modal telecommunication with remote partners, where the user can move freely within his home while the communication follows him. The speaker diarization serves as a context source, which has been integrated in a service-oriented middleware architecture and provided to the application to select the most appropriate I/O device and to steer the camera towards the speaker during ambient communication.}},
  author       = {{Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}},
  journal      = {{IEEE Journal of Selected Topics in Signal Processing}},
  keywords     = {{audio streaming, audio visual data streaming, context information speech, face identification, face recognition, image segmentation, middleware, multimodal telecommunication, online diarization, service oriented middleware architecture, sessionless telecommunication, software architecture, speaker identification, speaker localization, speaker recognition, steerable camera, telecommunication computing, temporal segmentation, terminal-less telecommunication, video streaming}},
  number       = {{5}},
  pages        = {{845--856}},
  title        = {{{Online Diarization of Streaming Audio-Visual Data for Smart Environments}}},
  doi          = {{10.1109/JSTSP.2010.2050519}},
  volume       = {{4}},
  year         = {{2010}},
}

@article{17256,
  author       = {{Grimminger, Angela and Rohlfing, Katharina and Stenneken, Prisca}},
  issn         = {{1569-9773}},
  journal      = {{Gesture}},
  keywords     = {{task- oriented dialogue, Late Talker, maternal multimodal input, gestural motherese}},
  number       = {{2}},
  pages        = {{251--278}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Children's lexical skills and task demands affect gestural behavior in mothers of late-talking children and children with typical language development}}},
  doi          = {{10.1075/gest.10.2-3.07gri}},
  volume       = {{10}},
  year         = {{2010}},
}

@inproceedings{17259,
  abstract     = {{Learning is a social endeavor, in which the learner generally receives support from his/her social partner(s). In developmental research – even though tutors/adults behavior modifications in their speech, gestures and motions have been extensively studied, studies barely consider the recipient’s (i.e. the child’s) perspective in the analysis of the adult’s presentation, In addition, the variability in parental behavior, i.e. the fact that not every parent modifies her/his behavior in the same way, found less fine-grained analysis. In contrast, in this paper, we assume an interactional perspective investigating the loop between the tutor’s and the learner’s actions. With this approach, we aim both at discovering the levels and features of variability and at achieving a better understanding of how they come about within the course of the interaction. For our analysis, we used a combination of (1) qualitative investigation derived from ethnomethodological Conversation Analysis (CA), (2) semi-automatic computational 2D hand tracking and (3) a mathematically based visualization of the data. Our analysis reveals that tutors not only shape their demonstrations differently with regard to the intended recipient per se (adult-directed vs. child-directed), but most importantly that the learner’s feedback during the presentation is consequential for the concrete ways in which the presentation is carried out.}},
  author       = {{Pitsch, Karola and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta and Rohlfing, Katharina and Sagerer, Gerhard}},
  booktitle    = {{Gesture and Speech in Interaction}},
  keywords     = {{gaze, gesture, Multimodal, adult-child interaction}},
  title        = {{{On the loop of action modification and the recipient's gaze in adult-child interaction}}},
  year         = {{2009}},
}

@inproceedings{17272,
  abstract     = {{In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners.}},
  author       = {{Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}},
  keywords     = {{robot simulation, hand movement velocity, robotic interaction partner, robotic agent, robot-directed interaction, multimodal analysis, Motionese, Motherese, intelligent tutoring systems, immature cognitive capability, human computer interaction, eye gaze, child-directed speech, child-directed motion, bottom-up system, bottom-up saliency-based attention model, adult-robot interaction, adult-child interaction, adult-adult interaction, human-robot interaction, action learning, social learning scenario, social robotics, software agents, top-down feedback structures, tutoring behavior}},
  pages        = {{1--6}},
  publisher    = {{IEEE}},
  title        = {{{People modify their tutoring behavior in robot-directed interaction for action learning}}},
  doi          = {{10.1109/DEVLRN.2009.5175516}},
  year         = {{2009}},
}

@inproceedings{17268,
  author       = {{Schillingmann, Lars and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{International Conference on Development and Learning (ICDL 2009)}},
  isbn         = {{978-1-4244-4117-4}},
  keywords     = {{Acoustic Packaging, multimodal}},
  publisher    = {{IEEE}},
  title        = {{{Towards a Computational Model of Acoustic Packaging}}},
  doi          = {{10.1109/devlrn.2009.5175523}},
  year         = {{2009}},
}

@inproceedings{38543,
  abstract     = {{Today a large variety of mobile interaction devices such as PDAs and mobile phones enforce the development of a wide range of user interfaces for each platform. The complexity even grows, when multiple interaction devices are used to perform the same task and when different modalities have to be supported. We introduce a new dialog model for the abstraction of concrete user interfaces with a separate advanced control layer for the integration of different modalities. In this context, we present the Dialog and Interface Specification Language (DISL), which comes with a proof-of-concept implementation.}},
  author       = {{Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}},
  booktitle    = {{Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA'2006)}},
  isbn         = {{978-3-540-70815-5}},
  keywords     = {{User Interface     Interaction Manager     Output Device     Multimodal Interface     Interaction Object}},
  title        = {{{Dialog Modelling for Multiple Devices and Multiple Interaction Modalities}}},
  doi          = {{10.1007/978-3-540-70816-2_4}},
  year         = {{2006}},
}

@inproceedings{39350,
  abstract     = {{Variation in different mobile devices with different capabilities and interaction modalities as well as changing user context in nomadic applications, poses huge challenges to the design of user interfaces. To avoid multiple designs for each device or modality, it is almost a must to employ a model-based approach. In this short paper, we present a new dialog model for multimodal interaction together with an advanced control model, which can either be used for direct modeling by an interface designer or in conjunction with higher level models.}},
  author       = {{Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}},
  booktitle    = {{Proceedings of EHCI-DSVIS 2005}},
  keywords     = {{Multimodal User Interface     High Level Model     Multimodal User     High Level Approach     Dialog Model}},
  title        = {{{A Novel Dialog Model for the Design of Multimodal User Interfaces}}},
  year         = {{2004}},
}

