@inbook{64840,
  abstract     = {{Multilingual picture books are considered to have great potential for language reflection and language learning, partly due to their multimodality, in which writing(s), language(s) and image(s) interact in the construction of
meaning. What possibilities multilingual picture books offer for grammatical learning is still a research desidera-
tum. In a multi-perspective qualitative analysis that brings together categories from picture book and typography
research as well as graphemics and grammar, three variants of multilingual picture books are analysed. The results show that all of them – to varying degrees – open up possibilities for the further development of multiliteracies and offer potential that could be used in classroom practice for both implicit and explicit grammatical learning.
}},
  author       = {{Topalovic, Elvira and Härtel, Kira}},
  booktitle    = {{Grammatikdidaktik und Mehrsprachigkeit: Theoretische und empirische Perspektiven}},
  editor       = {{Geyer, Sabrina and Cristante, Valentina}},
  keywords     = {{grammar, multimodality, multiliteracies, picture book, typography, Grammatik, Multimodalität, Multiliteracies, Bilderbuch, Typographie}},
  pages        = {{190 -- 207}},
  title        = {{{Grammatik und Multimodalität im mehrsprachigen Bilderbuch. Qualitative Analyse an der Schnittstelle von Schrift, Sprache und Bild}}},
  doi          = {{https://doi.org/10.46586/SLLD.462}},
  year         = {{2026}},
}

@article{43437,
  abstract     = {{<jats:p>In virtual reality (VR), participants may not always have hands, bodies, eyes, or even voices—using VR helmets and two controllers, participants control an avatar through virtual worlds that do not necessarily obey familiar laws of physics; moreover, the avatar’s bodily characteristics may not neatly match our bodies in the physical world. Despite these limitations and specificities, humans get things done through collaboration and the creative use of the environment. While multiuser interactive VR is attracting greater numbers of participants, there are currently few attempts to analyze the in situ interaction systematically. This paper proposes a video-analytic detail-oriented methodological framework for studying virtual reality interaction. Using multimodal conversation analysis, the paper investigates a nonverbal, embodied, two-person interaction: two players in a survival game strive to gesturally resolve a misunderstanding regarding an in-game mechanic—however, both of their microphones are turned off for the duration of play. The players’ inability to resort to complex language to resolve this issue results in a dense sequence of back-and-forth activity involving gestures, object manipulation, gaze, and body work. Most crucially, timing and modified repetitions of previously produced actions turn out to be the key to overcome both technical and communicative challenges. The paper analyzes these action sequences, demonstrates how they generate intended outcomes, and proposes a vocabulary to speak about these types of interaction more generally. The findings demonstrate the viability of multimodal analysis of VR interaction, shed light on unique challenges of analyzing interaction in virtual reality, and generate broader methodological insights about the study of nonverbal action.</jats:p>}},
  author       = {{Klowait, Nils}},
  issn         = {{2578-1863}},
  journal      = {{Human Behavior and Emerging Technologies}},
  keywords     = {{Human-Computer Interaction, General Social Sciences, Social Psychology, Virtual Reality : Multimodality, Nonverbal Interaction, Search Sequence, Gesture, Co-Operative Action, Goodwin, Ethnomethodology}},
  pages        = {{1--15}},
  publisher    = {{Hindawi Limited}},
  title        = {{{On the Multimodal Resolution of a Search Sequence in Virtual Reality}}},
  doi          = {{10.1155/2023/8417012}},
  volume       = {{2023}},
  year         = {{2023}},
}

@inproceedings{48882,
  abstract     = {{In multimodal multi-objective optimization (MMMOO), the focus is not solely on convergence in objective space, but rather also on explicitly ensuring diversity in decision space. We illustrate why commonly used diversity measures are not entirely appropriate for this task and propose a sophisticated basin-based evaluation (BBE) method. Also, BBE variants are developed, capturing the anytime behavior of algorithms. The set of BBE measures is tested by means of an algorithm configuration study. We show that these new measures also transfer properties of the well-established hypervolume (HV) indicator to the domain of MMMOO, thus also accounting for objective space convergence. Moreover, we advance MMMOO research by providing insights into the multimodal performance of the considered algorithms. Specifically, algorithms exploiting local structures are shown to outperform classical evolutionary multi-objective optimizers regarding the BBE variants and respective trade-off with HV.}},
  author       = {{Heins, Jonathan and Rook, Jeroen and Schäpermeier, Lennart and Kerschke, Pascal and Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from Nature (PPSN XVII)}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tusar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  keywords     = {{Anytime behavior, Benchmarking, Continuous optimization, Multi-objective optimization, Multimodality, Performance metric}},
  pages        = {{192–206}},
  publisher    = {{Springer International Publishing}},
  title        = {{{BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems}}},
  doi          = {{10.1007/978-3-031-14714-2_14}},
  year         = {{2022}},
}

@article{34564,
  abstract     = {{To provide user interfaces for a rich set of devices and interaction modalities, we follow a model-based development methodology. We devised an architecture which deploys user interfaces specified as dialogue models with abstract interaction objects and allows context-based adaptations by means of an external transcoding process. For the validation of the applicability of this methodology for developing usable multimodal multi-device systems, we present two case studies based on proof-of-concept implementations and assessed them with a large set of established design principles and different types of modality cooperation.}},
  author       = {{Schäfer, Robbie and Müller, Wolfgang}},
  journal      = {{Journal on Multimodal User Interfaces}},
  keywords     = {{Interaction architecture     Abstract interaction objects     Dialogue model     Transformations     Multimodality     Multi-device     Design principles}},
  number       = {{1}},
  pages        = {{25--41}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Assessment of a Multimodal Interaction and Rendering System against Established Design Principles}}},
  doi          = {{10.1007/s12193-008-0003-3}},
  volume       = {{2}},
  year         = {{2008}},
}

