@article{43437,
  abstract     = {{<jats:p>In virtual reality (VR), participants may not always have hands, bodies, eyes, or even voices—using VR helmets and two controllers, participants control an avatar through virtual worlds that do not necessarily obey familiar laws of physics; moreover, the avatar’s bodily characteristics may not neatly match our bodies in the physical world. Despite these limitations and specificities, humans get things done through collaboration and the creative use of the environment. While multiuser interactive VR is attracting greater numbers of participants, there are currently few attempts to analyze the in situ interaction systematically. This paper proposes a video-analytic detail-oriented methodological framework for studying virtual reality interaction. Using multimodal conversation analysis, the paper investigates a nonverbal, embodied, two-person interaction: two players in a survival game strive to gesturally resolve a misunderstanding regarding an in-game mechanic—however, both of their microphones are turned off for the duration of play. The players’ inability to resort to complex language to resolve this issue results in a dense sequence of back-and-forth activity involving gestures, object manipulation, gaze, and body work. Most crucially, timing and modified repetitions of previously produced actions turn out to be the key to overcome both technical and communicative challenges. The paper analyzes these action sequences, demonstrates how they generate intended outcomes, and proposes a vocabulary to speak about these types of interaction more generally. The findings demonstrate the viability of multimodal analysis of VR interaction, shed light on unique challenges of analyzing interaction in virtual reality, and generate broader methodological insights about the study of nonverbal action.</jats:p>}},
  author       = {{Klowait, Nils}},
  issn         = {{2578-1863}},
  journal      = {{Human Behavior and Emerging Technologies}},
  keywords     = {{Human-Computer Interaction, General Social Sciences, Social Psychology, Virtual Reality : Multimodality, Nonverbal Interaction, Search Sequence, Gesture, Co-Operative Action, Goodwin, Ethnomethodology}},
  pages        = {{1--15}},
  publisher    = {{Hindawi Limited}},
  title        = {{{On the Multimodal Resolution of a Search Sequence in Virtual Reality}}},
  doi          = {{10.1155/2023/8417012}},
  volume       = {{2023}},
  year         = {{2023}},
}

@inproceedings{29840,
  abstract     = {{Due to the proliferation of Virtual Reality (VR) technology, VR is finding new applications in various domains, such as stock trading. Here, traders invest in stocks intending to increase their profit. For this purpose, in conventional stock trading, traders usually make use of 2D applications on desktop or laptop devices. This leads to many drawbacks such as poor visibility due to limited 2D representation, complex interaction due to indirect interaction via mouse and keyboard, or restricted support for collaboration between traders. To overcome these issues, we have developed a novel collaborative, virtual environment for stock trading, which enables stock traders to view financial information and trade stocks with other collaborators. The main results of a usability study indicate that the VR environment, compared to conventional stock trading, shows no significant advantages concerning efficiency and effectiveness, however, we could observe an increased user satisfaction and better collaboration.}},
  author       = {{Yigitbas, Enes and Gottschalk, Sebastian and Nowosad, Alexander and Engels, Gregor}},
  booktitle    = {{Proceedings of the 17th International Conference on Wirtschaftsinformatik}},
  keywords     = {{virtual reality, stock trading, collaboration, usability}},
  location     = {{Nuremberg}},
  publisher    = {{AIS}},
  title        = {{{Development and Evaluation of a Collaborative Stock Trading Environment in Virtual Reality}}},
  year         = {{2022}},
}

@article{34127,
  abstract     = {{Obwohl die Idee von Augmented Reality (AR) und Virtual Reality (VR) so alt ist wie die Verbreitung erster Spielekonsolen und Computer, hat das Thema erst durch den technologischen Fortschritt und dem damit verbundenen Preisverfall an Bedeutung gewonnen [1]. So lassen sich mittlerweile bereits anspruchsvolle AR- und VR-Anwendungen auf handelsüblichen Smartphones und Tablets betreiben [1][2]. Daraus erschließen sich neue Möglichkeiten in der Lehre, z.B die Visualisierung räumlicher Darstellungen, die Förderung der räumlicher Vorstellungskraft der Studierenden, sowie die Vermittlung von abstrakten und damit schwer verständlichen Konzepten in den Naturwissenschaften [3].
Zahlreiche Studien zeigen bereits, dass, wenn AR effektiv in der Lehre eingesetzt wird, nicht nur das Lerninteresse, sondern auch die Konzentration der Lernenden gesteigert werden kann [3][4]. Voraussetzung hierfür ist jedoch, dass zunächst lernförderliche Merkmale identifiziert und bzgl. ihrer Wirksamkeit in einer VR- oder AR-Umgebung untersucht werden [5].
Zu den Pflichtveranstaltungen eines Elektrotechnik-Studiums an der Universität Paderborn gehören drei fächerübergreifende Laborpraktika, die der Vertiefung theoretischer Vorlesungsinhalte dient. Ein großes Problem stellt dabei die Bedienung der elektrotechnischen Laborgeräte dar. Sowohl Studierende als auch die betreuenden Laboringenieure kritisieren, dass ein erster Kontakt mit den Geräten erst innerhalb des Praktikums stattfindet. Um dieser Problematik entgegen zu wirken, soll eine Lernumgebung entwickelt werden, in der Studierende den Umgang mit dem Laborequipment sowohl zeit- als auch ortsunabhängig erlernen können.
In diesem Beitrag wird daher untersucht, welche Potentiale die VR- und die AR-Technologie auf mobilen Endgeräten bieten, um praktische Fertigkeiten im Umgang mit elektrotechnischer Laborausstattung als Vorbereitung auf das praktische Arbeiten im Labor zu erwerben und zu vertiefen. Es wird gezeigt, wo die besonderen Unterschiede und Vorzüge beider Technologien sind und insbesondere wie die (Inter-)Aktion des Lernenden innerhalb einer VR- oder AR-Umgebung aussehen kann. 
In einer anschließenden Arbeit soll ausgehend von den hier erarbeiteten Potentialen und den zu bekannten lerntheorethischen und kognitionspsychologischen Thereorien des Wissenserwerbs ein Konzept zur Gestaltung einer VR- und einer AR-Umgebung im Rahmen eines Laborpraktikums entwickelt werden. Dabei werden motivationspsychologische Aspekte, z.B. etablierte Gamification-Konzepte analysiert, die in solch einer Umgebung genutzt werden können, um u.a.die Lernmotivation weiter zu fördern.}},
  author       = {{Alptekin, Mesut and Temmen, Katrin}},
  isbn         = {{978-3-9818728-1-1}},
  journal      = {{Digitalisierung in der Techniklehre - ihr Beitrag zum Profil technischer Bildung}},
  keywords     = {{Virtual Reality, Augmented Reality, Laborpraktika, Ingenieurdidaktik, Labordidaktik}},
  location     = {{Technische Universität Ilmenau}},
  pages        = {{91--98}},
  publisher    = {{Gudrun Kammasch, Henning Klaf e, Sönke Knutzen (Hrsg.)}},
  title        = {{{Möglichkeiten und Grenzen von Virtual-und Augmented Reality im Laborpraktikum}}},
  volume       = {{12}},
  year         = {{2017}},
}

@inproceedings{39493,
  abstract     = {{This article presents the animated visual 3D programming language SAM (Solid Agents in Motion) for parallel systems specification and animation. A SAM program is a set of interacting agents synchronously exchanging messages. The agent's behaviour is specified by means of production rules with a condition and a sequence of actions each. Actions are linearly ordered and execute when matching a rule. In SAM, main syntactic objects like agents, rules, and messages are 3D. These objects can have an abstract and a concrete, solid 3D presentation. While the abstract representation is for programming and debugging, the concrete representation is for animated 3D end-user presentations. After outlining the concepts of SAM this article gives two programming examples of 3D micro worlds and an overview of the programming environment.}},
  author       = {{Geiger, Christian and Müller, Wolfgang and Rosenbach, W.}},
  booktitle    = {{Proceedings of the IEEE Symposium on Visual Languages}},
  isbn         = {{0-8186-8712-6}},
  keywords     = {{Animation, Computer languages, Solids, Concrete, Application software, Virtual reality, Programming profession, Switches, Visualization, Debugging}},
  location     = {{Halifax, Canada}},
  title        = {{{SAM - An Animated 3D Programming Language}}},
  doi          = {{10.1109/VL.1998.706167}},
  year         = {{1998}},
}

@inproceedings{39505,
  abstract     = {{3D-graphics are becoming popular in a steadily increasing number of areas such as entertainment, scientific visualization, simulation, and virtual reality. Despite this rapid growth the generation of animated 3D scenes is by no means trivial. Since animated 3D objects evolve over time the authors denote these objects as 4D. The article presents a novel approach to the rapid prototyping of 4D models. They introduce the AAL (Animated Agent Layer) system. AAL is an interpreter-based approach covering a textual (AAL-PR) as well as a visual command language (AAL-VL) for the specification of the dynamics in 4D scenes. AAL provides support for different levels of abstraction: primitives, structured objects, animated objects, and animated (autonomous) agents.}},
  author       = {{Dücker, M. and Geiger, Christian and Hunstock, R. and Lehrenfeld, Georg and Müller, Wolfgang}},
  booktitle    = {{Proceedings of the 1997 IEEE Symposium on Visual Languages}},
  isbn         = {{0-8186-8144-6}},
  keywords     = {{Prototypes, Layout, Animation, Command languages, Application software, Libraries, Virtual reality, Computer graphics, Hardware, Context modeling}},
  title        = {{{Visual-Textual Prototyping of 4D Scenes}}},
  doi          = {{10.1109/VL.1997.626601}},
  year         = {{1997}},
}

