@inproceedings{4486,
  author       = {{Gebhardt, Anja and Jenert, Tobias}},
  keywords     = {{learning culture, social interaction}},
  location     = {{Belgrad, Serbien }},
  title        = {{{How to Assess and Study the Cultural Dimension of Social Interactions in Higher Education Institutions (HEI)}}},
  year         = {{2012}},
}

@article{17225,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@article{17428,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@inproceedings{1120,
  abstract     = {{SCM is a simple, modular and flexible system for web monitoring and customer interaction management. In our view, its main advantages are the following: It is completely web based. It combines all technologies, data, software agents and human agents involved in the monitoring and customer interaction process. It can be used for messages written in any natural language. Although the prototype of SCM is designed for classifying and processing messages about mobile-phone related problems in social networks, SCM can easily be adapted to other text types such as discussion board posts, blogs or emails. Unlike comparable systems, SCM uses linguistic technologies to classify messages and recognize paraphrases of product names. For two reasons, product name paraphrasing plays a major role in SCM: First, product names typically have many, sometimes hundreds or thousands of intralingual paraphrases. Secondly, product names have interlingual paraphrases: The same products are often called or spelt differently in different countries and/or languages. By mapping product name variants to an international canonical form, SCM allows for answering questions like Which statements are made about this mobile phone in which languages/in which social networks/in which countries/...? The SCM product name paraphrasing engine is designed in such a way that standard variants are assigned automatically, regular variants are assigned semiautomatically and idiosyncratic variants can be added manually. With this and similar features we try to realize our philosophy of simplicity, modularity and flexibility: Whatever can be done automatically is done automatically. But manual intervention is always possible and easy and it does not conflict in any way with the automatic functions of SCM.}},
  author       = {{Schuster, Jörg and Lee, Yeong Su and Kobothanassi, Despina  and Bargel, Matthias and Geierhos, Michaela}},
  booktitle    = {{International Conference on Information Society (i-Society 2011)}},
  isbn         = {{978-1-61284-148-9}},
  keywords     = {{Social Media Business Integration, Contact Center Application Support, Monitoring Social Conversations, Social Customer Interaction Management, Monitoring, Software Agents}},
  location     = {{London, UK}},
  pages        = {{153--158}},
  publisher    = {{IEEE}},
  title        = {{{SCM - A Simple, Modular and Flexible Customer Interaction Management System}}},
  year         = {{2011}},
}

@article{1125,
  abstract     = {{Since customers first share their problems with a social networking community before directly addressing a company, social networking sites such as Facebook, Twitter, MySpace or Foursquare will be the interface between customer and company. For this reason, it is assumed that social networks will evolve into a common communication channel – not only between individuals but also between customers and companies. However, social networking has not yet been integrated into customer interaction management (CIM) tools. In general, a CIM application is used by the agents in a contact centre while communicating with the customers. Such systems handle communication across multiple different channels, such as e-mail, telephone, Instant Messaging, letter etc. What we do now is to integrate social networking into CIM applications by adding another communication channel. This allows the company to follow general trends in customer opinions on the Internet, but also record two-sided communication for customer service management and the company’s response will be delivered through the customer’s preferred social networking site.}},
  author       = {{Geierhos, Michaela}},
  issn         = {{17982340}},
  journal      = {{Journal of Advances in Information Technology}},
  keywords     = {{Social Media Business Integration, Multichannel Customer Interaction Management, Contact Centre Application Support}},
  number       = {{4}},
  pages        = {{222--233}},
  publisher    = {{Engineering and Technology Publishing (ETPub)}},
  title        = {{{Customer Interaction 2.0: Adopting Social Media as Customer Service Channel}}},
  doi          = {{10.4304/jait.2.4.222-233}},
  volume       = {{2}},
  year         = {{2011}},
}

@article{17233,
  abstract     = {{It has been proposed that the design of robots might benefit from interactions that are similar to caregiver–child interactions, which is tailored to children’s respective capacities to a high degree. However, so far little is known about how people adapt their tutoring behaviour to robots and whether robots can evoke input that is similar to child-directed interaction. The paper presents detailed analyses of speakers’ linguistic and non-linguistic behaviour, such as action demonstration, in two comparable situations: In one experiment, parents described and explained to their nonverbal infants the use of certain everyday objects; in the other experiment, participants tutored a simulated robot on the same objects. The results, which show considerable differences between the two situations on almost all measures, are discussed in the light of the computer-as-social-actor paradigm and the register hypothesis.}},
  author       = {{Fischer, Kerstin and Foth, Kilian and Rohlfing, Katharina and Wrede, Britta}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{human–robot interaction (HRI), social communication, register theory, motionese, robotese, child-directed speech (CDS), motherese, mindless transfer, computers-as-social-actors}},
  number       = {{1}},
  pages        = {{134--161}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot}}},
  doi          = {{10.1075/is.12.1.06fis}},
  volume       = {{12}},
  year         = {{2011}},
}

@article{17246,
  author       = {{Nomikou, Iris and Rohlfing, Katharina}},
  issn         = {{1943-0612}},
  journal      = {{IEEE Transactions on Autonomous Mental Development}},
  keywords     = {{acoustic packaging, mother-child interaction, social learning, multimodal grounding in input, ecology of interactions, synchrony}},
  number       = {{2}},
  pages        = {{113--128}},
  publisher    = {{Institute of Electrical & Electronics Engineers (IEEE)}},
  title        = {{{Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds}}},
  doi          = {{10.1109/TAMD.2011.2140113}},
  volume       = {{3}},
  year         = {{2011}},
}

@inproceedings{17430,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@inproceedings{17244,
  abstract     = {{Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback}},
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}},
  keywords     = {{Feedback, Human Robot Interaction, Prominence, Multimodal Action Segmentation}},
  pages        = {{3105--3108}},
  title        = {{{Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}}},
  year         = {{2011}},
}

@inproceedings{17245,
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  issn         = {{1662-5188}},
  keywords     = {{Prominence, Multimodal Action Segmentation, Feedback, Color Saliency, Human Robot Interaction}},
  title        = {{{Acoustic Packaging and the Learning of Words}}},
  doi          = {{10.3389/conf.fncom.2011.52.00020}},
  year         = {{2011}},
}

@inproceedings{17242,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@inproceedings{17253,
  author       = {{Vollmer, Anna-Lisa and Pitsch, Karola and Lohan, Katrin Solveig and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning (ICDL), 2010 IEEE 9th International Conference on Development and Learning}},
  keywords     = {{tutoring interaction, social interaction, video signal processing, robot systems, paediatrics, neurophysiology, Learning, infant, feedback, biology computing, cognitive capabilities, cognition, children}},
  pages        = {{76--81}},
  title        = {{{Developing feedback: How children of different age contribute to a tutoring interaction with adults}}},
  year         = {{2010}},
}

@inproceedings{17259,
  abstract     = {{Learning is a social endeavor, in which the learner generally receives support from his/her social partner(s). In developmental research – even though tutors/adults behavior modifications in their speech, gestures and motions have been extensively studied, studies barely consider the recipient’s (i.e. the child’s) perspective in the analysis of the adult’s presentation, In addition, the variability in parental behavior, i.e. the fact that not every parent modifies her/his behavior in the same way, found less fine-grained analysis. In contrast, in this paper, we assume an interactional perspective investigating the loop between the tutor’s and the learner’s actions. With this approach, we aim both at discovering the levels and features of variability and at achieving a better understanding of how they come about within the course of the interaction. For our analysis, we used a combination of (1) qualitative investigation derived from ethnomethodological Conversation Analysis (CA), (2) semi-automatic computational 2D hand tracking and (3) a mathematically based visualization of the data. Our analysis reveals that tutors not only shape their demonstrations differently with regard to the intended recipient per se (adult-directed vs. child-directed), but most importantly that the learner’s feedback during the presentation is consequential for the concrete ways in which the presentation is carried out.}},
  author       = {{Pitsch, Karola and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta and Rohlfing, Katharina and Sagerer, Gerhard}},
  booktitle    = {{Gesture and Speech in Interaction}},
  keywords     = {{gaze, gesture, Multimodal, adult-child interaction}},
  title        = {{{On the loop of action modification and the recipient's gaze in adult-child interaction}}},
  year         = {{2009}},
}

@inproceedings{17272,
  abstract     = {{In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners.}},
  author       = {{Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}},
  keywords     = {{robot simulation, hand movement velocity, robotic interaction partner, robotic agent, robot-directed interaction, multimodal analysis, Motionese, Motherese, intelligent tutoring systems, immature cognitive capability, human computer interaction, eye gaze, child-directed speech, child-directed motion, bottom-up system, bottom-up saliency-based attention model, adult-robot interaction, adult-child interaction, adult-adult interaction, human-robot interaction, action learning, social learning scenario, social robotics, software agents, top-down feedback structures, tutoring behavior}},
  pages        = {{1--6}},
  publisher    = {{IEEE}},
  title        = {{{People modify their tutoring behavior in robot-directed interaction for action learning}}},
  doi          = {{10.1109/DEVLRN.2009.5175516}},
  year         = {{2009}},
}

@inproceedings{17267,
  author       = {{Lohse, Manja and Hanheide, Marc and Rohlfing, Katharina and Sagerer, Gerhard}},
  booktitle    = {{Proceedings of the 4th ACM/IEEE international conference on Human robot interaction - HRI '09}},
  keywords     = {{SINA, human robot interaction, biron}},
  pages        = {{93--100}},
  title        = {{{Systemic interaction analysis (SInA) in HRI}}},
  doi          = {{10.1145/1514095.1514114}},
  year         = {{2009}},
}

@article{34564,
  abstract     = {{To provide user interfaces for a rich set of devices and interaction modalities, we follow a model-based development methodology. We devised an architecture which deploys user interfaces specified as dialogue models with abstract interaction objects and allows context-based adaptations by means of an external transcoding process. For the validation of the applicability of this methodology for developing usable multimodal multi-device systems, we present two case studies based on proof-of-concept implementations and assessed them with a large set of established design principles and different types of modality cooperation.}},
  author       = {{Schäfer, Robbie and Müller, Wolfgang}},
  journal      = {{Journal on Multimodal User Interfaces}},
  keywords     = {{Interaction architecture     Abstract interaction objects     Dialogue model     Transformations     Multimodality     Multi-device     Design principles}},
  number       = {{1}},
  pages        = {{25--41}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Assessment of a Multimodal Interaction and Rendering System against Established Design Principles}}},
  doi          = {{10.1007/s12193-008-0003-3}},
  volume       = {{2}},
  year         = {{2008}},
}

@inproceedings{17278,
  abstract     = {{This paper investigates the influence of feedback provided by an autonomous robot (BIRON) on users’ discursive behavior. A user study is described during which users show objects to the robot. The results of the experiment indicate, that the robot’s verbal feedback utterances cause the humans to adapt their own way of speaking. The changes in users’ verbal behavior are due to their beliefs about the robots knowledge and abilities. In this paper they are identified and grouped. Moreover, the data implies variations in user behavior regarding gestures. Unlike speech, the robot was not able to give feedback with gestures. Due to the lack of feedback, users did not seem to have a consistent mental representation of the robot’s abilities to recognize gestures. As a result, changes between different gestures are interpreted to be unconscious variations accompanying speech.}},
  author       = {{Lohse, Manja and Rohlfing, Katharina and Wrede, Britta and Sagerer, Gerhard}},
  isbn         = {{1050-4729}},
  keywords     = {{discursive behavior, autonomous robot, BIRON, man-machine systems, robot abilities, robot knowledge, user gestures, robot verbal feedback utterance, speech processing, user verbal behavior, service robots, human-robot interaction, human computer interaction, gesture recognition}},
  pages        = {{3481--3486}},
  title        = {{{“Try something else!” — When users change their discursive behavior in human-robot interaction}}},
  doi          = {{10.1109/ROBOT.2008.4543743}},
  year         = {{2008}},
}

@article{64041,
  abstract     = {{Three cis-dioxovanadium(V) complexes with similar N -salicylidenehydrazide ligands modeling hydrogen bonding interactions of vanadate relevant for vanadium haloperoxidases are studied by 51V solid-state NMR spectroscopy. Their parameters describing the quadrupolar and chemical shift anisotropy interactions (quadrupolar coupling constant C Q , asymmetry of the quadrupolar tensor η Q , isotropic chemical shift δ iso , chemical shift anisotropy δ σ , asymmetry of the chemical shift tensor η σ and the Euler angles α , β and γ ) are determined both experimentally and theoretically using DFT methods. A comparative study of different methods to determine the NMR parameters by numerical simulation of the spectra is presented. Detailed theoretical investigations on the DFT level using various basis sets and structural models show that by useful choice of the methodology, the calculated parameters agree to the experimental ones in a very good manner.}},
  author       = {{Schweitzer, Annika and Gutmann, Torsten and Wächtler, Maria and Breitzke, Hergen and Buchholz, Axel and Plass, Winfried and Buntkowsky, Gerd}},
  journal      = {{Solid State Nuclear Magnetic Resonance}},
  keywords     = {{51V NMR, Model system, Ab initio calculation, Cis-dioxovanadium(V) complex, Haloperoxidase, Numerical optimization, Quadrupolar interaction}},
  number       = {{1–2}},
  pages        = {{52–67}},
  title        = {{{51V solid-state NMR investigations and DFT studies of model compounds for vanadium haloperoxidases}}},
  doi          = {{10.1016/j.ssnmr.2008.02.003}},
  volume       = {{34}},
  year         = {{2008}},
}

@article{6087,
  abstract     = {{Visual backward masking is frequently used to study the temporal dynamics of visual perception. These dynamics may include the temporal features of conscious percepts, as suggested, for instance, by the asynchronous-updating model (Neumann, 1982) and perceptual-retouch theory (Bachmann, 1994). These models predict that the perceptual latency of a visual backward mask is shorter than that of a like reference stimulus that was not preceded by a masked stimulus. The prediction has been confirmed by studies using temporal-order judgments: For certain asynchronies between mask and reference stimulus, temporal-order reversals are quite frequent (e.g. Scharlau, & Neumann, 2003a). However, it may be argued that these reversals were due to a response bias in favour of the mask rather than true temporal-perceptual effects. I introduce two measures for assessing latency effects that (1) are not prone to such a response bias, (2) allow to quantify the latency gain, and (3) extend the perceptual e}},
  author       = {{Scharlau, Ingrid}},
  issn         = {{1895-1171}},
  journal      = {{Advances in Cognitive Psychology}},
  keywords     = {{temporal processes, prime mask interaction, perceptual consequences, masked information, visual backward masking, visual perception, Temporal Lobe, Visual Masking, Visual Perception, Consequence}},
  number       = {{1-2}},
  pages        = {{241 -- 255}},
  title        = {{{Temporal processes in prime-mask interaction: Assessing perceptual consequences of masked information.}}},
  volume       = {{3}},
  year         = {{2007}},
}

@inproceedings{38543,
  abstract     = {{Today a large variety of mobile interaction devices such as PDAs and mobile phones enforce the development of a wide range of user interfaces for each platform. The complexity even grows, when multiple interaction devices are used to perform the same task and when different modalities have to be supported. We introduce a new dialog model for the abstraction of concrete user interfaces with a separate advanced control layer for the integration of different modalities. In this context, we present the Dialog and Interface Specification Language (DISL), which comes with a proof-of-concept implementation.}},
  author       = {{Schäfer, Robbie and Bleul, Steffen and Müller, Wolfgang}},
  booktitle    = {{Proceedings of the 5th International Workshop on Task Models and Diagrams for User Interface Design (TAMODIA'2006)}},
  isbn         = {{978-3-540-70815-5}},
  keywords     = {{User Interface     Interaction Manager     Output Device     Multimodal Interface     Interaction Object}},
  title        = {{{Dialog Modelling for Multiple Devices and Multiple Interaction Modalities}}},
  doi          = {{10.1007/978-3-540-70816-2_4}},
  year         = {{2006}},
}

