@inproceedings{18372,
  abstract     = {{Simulation und Visualisierung sind anerkannte Mittel zum Verstehen und Analysieren von Fertigungsprozessen. In Visualisierungen von Fertigungsprozessen können Betrachter frei und ungeleitet umherwandern. Erkenntnisse werden so aber eher zufällig erworben. Dieser Artikel skizziert ein System und Methoden, die den Betrachter unterstützen auf auffällige/signifikante Prozesse/ Punkte in Materialflusssimulationen aufmerksam zu werden und diese zu entschärfen.
Es wird der Entwurf eines Werkzeugs beschrieben, dass den Betrachter einer Simulation die Möglichkeit bietet, signifikante Produktionsprozesse interaktiv zu verbessern. Der Benutzer wird sich in einer virtuellen 3D-Umgebung (Walkthrough-System) bewegen können und automatisch ermittelte Indizien für signifikante Abläufe erhalten. Zugleich soll die Simulation signifikante Objekte genauer simulieren. Bekundet der Benutzer Interesse an einem signifikanten Prozess, wird er automatisch zu dem jeweiligen Ort geführt werden und dort durch Eingriffe in die Simulation die kritische Situation experimentell untersuchen können. Da der kritische Moment in der Vergangenheit liegt und somit vom Betrachter schon verpasst ist, wird es dem Betrachter möglich sein, die Simulation auf einen Zeitpunkt vor dem Eintreten zurück zu setzen.
Die virtuelle Szene (3D-Grafik-Modelle) einer typischen dynamischen Simulationsumgebung ist in der Regel zu komplex, um sie in Echtzeit in einem Walkthrough-System zu visualisieren und darzustellen. Typischerweise werden Approximationsverfahren eingesetzt, um die Komplexität zu reduzieren und ein flüssiges Navigieren des Betrachters zu erlauben. Durch spezifische Simulations-Anforderungen ist bekannt, an welchen Objekten des Simulationsmodells Probleme auftreten; sie sind für den Betrachter wichtig. Die zugehörigen virtuellen 3D-Repräsentanten, können von den Approximationsalgorithmen mit einer besonders hohen Darstellungsqualität dargestellt werden und die übrigen Teile der virtuellen Szene entsprechend vernachlässigt werden. Solche Approximationsalgorithmen und Datenstrukturen nutzen die spezifischen Eigenschaften virtueller Simulationsumgebungen aus, um eine hohe Darstellungsqualität und Darstellungsperformance zu erreichen.
}},
  author       = {{Dangelmaier, Wilhelm and  Franke, Werner and Mueck, Bengt and Fischer, Matthias}},
  booktitle    = {{2. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{141--151}},
  title        = {{{Komponenten zur aktiven Unterstützung der Analyse von Materialflusssimulationen in virtuellen Umgebungen}}},
  volume       = {{123}},
  year         = {{2003}},
}

@inproceedings{18374,
  abstract     = {{In der heutigen Zeit operieren Unternehmen zunehmend in einem schwierigen Umfeld: Die Innovationsdynamik nimmt zu und die Produktlebenszyklen werden kürzer. Daraus resultieren hohe Anforderungen an die Planung von Fertigungssysteme. Um diesen Prozess zu unterstützen, sollen die Technologien Augmented Reality und Virtual Reality in einem integrierten System genutzt werden. Dieses System unterstützt den Anwender bei der Modellbildung, der Validierung des Simulationsmodells sowie der folgenden Optimierung des Fertigungssystems. Durch die Entwicklung geeigneter Kopplungs- bzw. Integrationsmechanismen wird eine durchgängige Nutzung der Technologien AR, VR und Simulation realisiert. Die Visualisierung der anfallenden 3D-Daten innerhalb der VR- und ARUmgebungen erfolgt mittels einer 3D-Renderinglibrary, die es durch den Einsatz von neuen entwickelten Verfahren ermöglicht, die verwendeten 3D-Modelle weitgehend automatisiert aus unternehmensinternen 3D-CAD-Modellen zu generieren.}},
  author       = {{Fischer, Matthias and Grafe, Michael and Matysczok, Carsten and Schoo, Michael and Mueck, Bengt}},
  booktitle    = {{2. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{153--166}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}},
  title        = {{{Planung von komplexen Fertigungssystemen durch Einsatz einer VR/AR-unterstützten Simulation}}},
  volume       = {{123}},
  year         = {{2003}},
}

@inproceedings{18369,
  abstract     = {{Visualising is a method used to help experiencing and understanding causal cohesions in simulation processes. For this purpose, tools for visualising are already implemented in prevalent simulation systems. The user creates his simulation model and generates a 3-dimensional (2,5-dimensional) visualising by means of the simulation system. This helps examining the process which makes it easier for the viewer to understand it. Simulation tools usually only provide the opportunity for a unidirectional visualising. In a 3-dimensional surrounding the viewer can not implement an interaction with the simulation while the system is running. Though an interaction during the simulation run enables the user to gain a better understanding of causal cohesions. Solutions via HLA are sophisticated and therefore rather suited for extensive projects.
We present a distributed system consisting of a commercial manufacturing simulation tool, a coupling module and a walkthrough system. The distributed system in conjunctions with the coupling module guarantees generality and a wide field of applications of the walkthrough system. Further it guarantees flexibility and selection of the specialized graphics hardware for the walkthrough system. A further contribution of this paper is the solution of the time synchronisation problem caused by simulation tool and walkthrough system.
}},
  author       = {{Mueck, Bengt and Dangelmaier, Wilhelm and Fischer, Matthias and Klemisch, Wolfram}},
  booktitle    = {{Simulation und Visualisierung}},
  pages        = {{71--84}},
  publisher    = {{SCS European Publishing House}},
  title        = {{{Bi-directional Coupling of Simulation Tools with a Walkthrough-System}}},
  year         = {{2002}},
}

@inproceedings{16490,
  abstract     = {{We present a new data structure for rendering highly complex virtual environments of arbitrary topology. The special feature of our approach is that it allows an interactive navigation in very large scenes (30 GB/400 million polygons in our benchmark scenes) that cannot be stored in main memory, but only on a local or remote hard disk. Furthermore, it allows interactive rendering of substantially more complex scenes by instantiating objects.

For the computation of an approximate image of the scene, a sampling technique is used. In the preprocessing, a so-called sample tree is built whose nodes contain randomly selected polygons from the scene. This tree only uses space that is linear in the number of polygons. In order to produce an image of the scene, the tree is traversed and polygons stored in the visited nodes are rendered. During the interactive walkthrough, parts of the sample tree are loaded from local or remote hard disk.

We implemented our algorithm in a prototypical walkthrough system. Analysis and experiments show that the quality of our images is comparable to images computed by the conventional z-buffer algorithm regardless of the scene topology.}},
  author       = {{Klein, Jan and Krokowski, Jens and Fischer, Matthias and Wand, Michael and Wanka, Rolf and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the ACM symposium on Virtual reality software and technology  - VRST '02}},
  isbn         = {{1581135300}},
  title        = {{{The randomized sample tree: a data structure for interactive walkthroughs in externally stored virtual environments}}},
  doi          = {{10.1145/585740.585764}},
  year         = {{2002}},
}

@inproceedings{18370,
  abstract     = {{We present a new approximate occlusion-culling algorithm that in contrast to other algorithms, manages the objects of the scene in a 3D-sectorgraph. For generating a frame, as far as possible only the visible objects are rendered that can be found quickly by an edge of the graph. The algorithm allows a real-time navigation with over 20 frames per second in complex scenes consisting of over 10 millions of polygons. Moreover, approximation errors are very low.}},
  author       = {{Klein, Jan and Fischer, Matthias}},
  booktitle    = {{Proc. of 3. GI-Informatiktage 2001}},
  pages        = {{275 -- 278}},
  title        = {{{Occlusion Culling for Virtual Environments based on the 3D-Sectorgraph}}},
  year         = {{2001}},
}

@inproceedings{16492,
  abstract     = {{We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of an arbitrary three-dimensional scene consisting of triangular primitives by reconstruction from a dynamically chosen set of random surface sample points. This approach is independent of mesh connectivity and topology. The resulting rendering time grows only logarithmically with the numbers of triangles in the scene. We were able to render walkthroughs of scenes of up to 10^14 triangles at interactive frame rates. Automatic identification of low detail scene components ensures that the rendering speed of the randomized z-buffer cannot drop below that of conventional z-buffer rendering. Experimental and analytical evidence is given that the image quality is comparable to that of common approaches like z-buffer rendering. The precomputed data structures employed by the randomized z-buffer allow for interactive dynamic updates of the scene. Their memory requirements grow only linearly with the number of triangles and allow for a scene graph based instantiation scheme to further reduce memory consumption.}},
  author       = {{Wand, Michael and Fischer, Matthias and Peter, Ingmar and Meyer auf der Heide, Friedhelm and Straßer, Wolfgang}},
  booktitle    = {{Proceedings of the 28th annual conference on Computer graphics and interactive techniques  - SIGGRAPH '01}},
  isbn         = {{158113374X}},
  title        = {{{The randomized z-buffer algorithm}}},
  doi          = {{10.1145/383259.383299}},
  year         = {{2001}},
}

@techreport{17865,
  abstract     = {{We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of a three dimensional scene of triangular primitives by reconstruction from a random sample of surface points which are chosen with a probability proportional to the projected area of the objects. The approach is independent of mesh connectivity and topology. It leads to a rendering time that grows only logarithmically with the numbers of triangles in the scene and to linear memory consumption, thus allowing walkthroughs of scenes of extreme complexity. We consider different methods for image reconstruction which aim at correctness, rendering speed and image quality and we develop an efficient data structure for sample extraction in output-sensitive time which allows for efficient dynamic updates of the scene. Experiments confirm that scenes consisting of some hundred billion triangles can be rendered within seconds with an image quality comparable to a conventional z-buffer rendering; in special cases, realtime performance can be achieved.}},
  author       = {{Wand, Michael and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  title        = {{{Randomized Point Sampling for Output-Sensitive Rendering of Complex Dynamic Scenes}}},
  year         = {{2000}},
}

@inproceedings{17864,
  abstract     = {{A geometric spanner with vertex set P in Rd is a sparse approximation of the complete Euclidean graph determined by P. We introduce the notion of partitioned neighborhood graphs (PNGs), unifying and generalizing most constructions of spanners treated in literature. Two important parameters characterizing their properties are the outdegree k in N and the stretch factor f>1 describing the quality of approximation. PNGs have been throughly investigated with respect to small values of f. We present in this work results about small values of k. The aim of minimizing k rather than f arises from two observations:

* k determines the amount of space required for storing PNGs.

* Many algorithms employing a (previously constructed) spanner have running times depending on its outdegree.

Our results include, for fixed dimensions d as well as asymptotically, upper and lower bounds on this optimal value of k. The upper bounds are shown constructively and yield efficient algorithms for actually computing the corresponding PNGs even in degenerate cases.
}},
  author       = {{Fischer, Matthias and Lukovszki, Tamas and Ziegler, Martin}},
  booktitle    = {{Proceedings of the 11th Canadian Conference on Computational Geometry}},
  title        = {{{Partitioned neighborhood spanners of minimal outdegree}}},
  year         = {{1999}},
}

@inbook{17412,
  abstract     = {{We study algorithmic aspects in the management of geometric scenes in interactive walkthrough animations. We consider arbitrarily large scenes consisting of unit size balls. For a smooth navigation in the scene we have to fulfill hard real time requirements. Therefore, we need algorithms whose running time is independent of the total number of objects in the scene and that use as small space as possible. In this work we focus on one of the basic operations in our walkthrough system: reporting the objects around the visitor within a certain distance. Previously a randomized data structure was presented that supports reporting the balls around the visitor in an output sensitive time and allows insertion and deletion of objects nearly as fast as searching. These results were achieved by exploiting the fact that the visitor moves ''slowly'' through the scene. A serious disadvantage of the aforementioned data structure is a big space overhead and the use of randomization. Our first result is a construction of weak spanners that leads to an improvement of the space requirement of the previously known data structures. Then we develop a deterministic data structure for the searching problem in which insertion of objects are allowed. Our incremental data structure supports O(1+k) reporting time, where k is a certain quantity close to the number of reported objects. The insertion time is similar to the reporting time and the space is linear to the total number of objects.
}},
  author       = {{Fischer, Matthias and Lukovszki, Tamás and Ziegler, Martin}},
  booktitle    = {{Algorithms — ESA’ 98}},
  isbn         = {{9783540648482}},
  issn         = {{0302-9743}},
  title        = {{{Geometric Searching in Walkthrough Animations with Weak Spanners in Real Time}}},
  doi          = {{10.1007/3-540-68530-8_14}},
  year         = {{1998}},
}

@inproceedings{17863,
  abstract     = {{New dynamic search data structures developed recently guarantee constant execution time per search and update, i.e., they fulfil the real-time requirements necessary for interactive walkthrough in large geometric scenes. Yet, superiority or even applicability of these new methods in practice was still an open question.

Their prototypical implementation presented in this work uses common libraries on standard stations and thus represents a first strut to bridge this gap. Indeed our experimental results give an indication on the actual performance of these theoretical ideas on real machines and possible bottlenecks in future developments. By special algorithmic enhancements, we can even avoid the otherwise essential preprocessing step.
}},
  author       = {{Fischer, Matthias and Lukovszki, Tamas and Ziegler, Martin }},
  booktitle    = {{Algorithm Engineering, 2nd International Workshop, {WAE '98}}},
  pages        = {{133----142}},
  publisher    = {{Max-Planck-Institut für Informatik}},
  title        = {{{A Network Based Approach for Realtime Walkthrough of Massive Models}}},
  year         = {{1998}},
}

@techreport{18145,
  abstract     = {{Preis für den Beitrag "Multimediale Entdeckungsreisen unserer Welt mit dem Internet"}},
  author       = {{Ziegler, Martin and Fischer, Matthias and Lukovszki, Tamás}},
  title        = {{{Multimediale Entdeckungsreisen unserer Welt mit dem Internet}}},
  year         = {{1998}},
}

@inproceedings{16568,
  abstract     = {{We present a data structure problem which describes the requirements of a simple variant of fully dynamic walk-through animation: We assume the scene to consist of unit size balls in R2 or higher dimensions. The scene may be arbitrarily large and has to be stored in secondary memory (discs) with relatively slow access. We allow a visitor to walk in the scene, and a modeler to update the scene by insertions and deletions of balls. We focus on the realtime requirement of animation systems: For some t (specified by the computation power of (the rendering hardware of) the graphic workstation) the data structure has to guarantee that the balls within distance t of the current visitor's position are presented to the rendering hardware, 20 times per second. Insertions and deletions should also be available to the visitor with small delay, independent of the size of the scene. We present a data structure that fulfills the above task in realtime. Its runtime is output-sensitive, i.e. linear in a quantity close to the output size of the query. We further present (preliminary) experimental results indicating that our structure is efficient in practice.
}},
  author       = {{Fischer, Matthias and Meyer auf der Heide, Friedhelm and Strothmann, Willy-Bernhard}},
  booktitle    = {{5th Annual European Symposium on Algorithms (ESA '97)}},
  isbn         = {{9783540633976}},
  issn         = {{0302-9743}},
  pages        = {{157--170}},
  publisher    = {{Springer}},
  title        = {{{Dynamic data structures for realtime management of large geometric scenes}}},
  doi          = {{10.1007/3-540-63397-9_13}},
  volume       = {{1284}},
  year         = {{1997}},
}

@inproceedings{17483,
  abstract     = {{In this paper we develop a model for communication time on parallel computers consisting of processors and a service network, i.e., a network performing services like broadcast, synchronization, and global variables. The implementation of the service network is done on a free configurable Transputer network.
Our cost model describes the communication time of accesses to global variables and consists of a multi-linear function. The cost model includes the parameters packet size, send hot spot, and the number of processors accessing global variables. These parameters influence the communication time in a high degree and capture important parameters like contention.
We implement a Bitonic Sort and a Connected Components algorithm (among others) and we show that our model is able to predict the communication time within a 10% error if indirect service networks are used. The applications show that it is easy for a programmer to determine the parameter values for our model and that our new cost model precisely predicts the communication time of parallel algorithms.
Furthermore, we minimize the communication time of accesses to global variables by finding a balance between the number of messages in the network and their size. Our model predicts the optimal values for these parameters which we validate by experiments. A modified implementation of our routing which determines on-line the optimal parameter values for an access to a global variable achieves good speed ups.}},
  author       = {{Fischer, Matthias and Rethmann, Jochen and Wachsmann, Alf}},
  booktitle    = {{3rd Workshop on Abstract Machine Models for Parallel and Distributed Computing (AMW '96)}},
  isbn         = {{905199267X}},
  pages        = {{13–27}},
  publisher    = {{IOS Press}},
  title        = {{{A Realistic Cost Model for the Communication Time in Parallel Programs}}},
  year         = {{1996}},
}

@techreport{18352,
  abstract     = {{In this report, we develop a cost model for the communication time on parallel computers consisting of processors and a service network, i.e., a network performing services like broadcast, synchronization, and global variables. Because we do not have a parallel computer at our disposal that is equipped with a service network, we emulate the service network on a reconfigurable Transputer network.
Our cost model describes the communication time of accesses to global variables and consists of a multi­linear function. The cost model includes the parameters packet size, send hot spot (the number of messages sent out by one processor), and number of processors accessing global variables. We show that these parameters influence the communication time in a high degree and capture important parameters like network contention.
We implement a Bitonic Sort, Sample Sort, Matrix Multiplication, and Connected Components algorithm, and we show that our model is able to predict the communication time within a 10% error if indirect service networks are used. The applications show that it is easy for a programer to determine the parameter values for our model and that our new cost model precisely predicts the communication time of parallel algorithms.
We explore the interaction of hot spots and asynchrony and show that the influence of hot spots to the communication time is not as high as one would expect from theoretical considerations in a synchronous model. Therefore, we do not apprehend the hot spot in our cost model.
Furthermore, we minimize the communication time of accesses to global variables by finding a balance between the number of messages in the network and their size. Our model predicts the optimal values for these parameters which we validate by experiments. A modified implementation of our routing which determines on­line the optimal parameter values for an access to a global variable achieves good speed ups.
}},
  author       = {{Fischer, Matthias and Rethmann, Jochen and Wachsmann, Alf}},
  title        = {{{A Realistic Cost Model for the Communication Time in Parallel Programs on Parallel Computers Using a Service Hardware}}},
  year         = {{1996}},
}

