@inproceedings{28322,
  author       = {{Jähn, Claudius and Fischer, Matthias and Gerges, Maria and Berssenbrügge, Jan}},
  booktitle    = {{12. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung, Band 342}},
  pages        = {{107--120}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts}},
  title        = {{{Automatische Ableitung geometrischer Eigenschaften von Bauteilen aus dem 3-D-Polygonmodell}}},
  year         = {{2015}},
}

@inproceedings{17425,
  author       = {{Berssenbrügge, Jan  and Wiederkehr, Olga and Jähn, Claudius and Fischer, Matthias}},
  booktitle    = {{12. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{65--78}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts}},
  title        = {{{Anbindung des Virtuellen Prototypen an die Partialmodelle intelligenter technischer Systeme}}},
  volume       = {{343}},
  year         = {{2015}},
}

@inproceedings{17427,
  author       = {{Jähn, Claudius and Fischer, Matthias and Gerges, Maria and Berssenbrügge, Jan}},
  booktitle    = {{12. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{107--120}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts}},
  title        = {{{Automatische Ableitung geometrischer Eigenschaften von Bauteilen aus dem 3-D-Polygonmodell}}},
  volume       = {{342}},
  year         = {{2015}},
}

@inproceedings{23070,
  author       = {{Berssenbrügge, Jan and Wiederkehr, Olga and Jähn, Claudius and Fischer, Matthias}},
  booktitle    = {{12. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{65--78}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}},
  title        = {{{Anbindung des Virtuellen Prototypen an die Partialmodelle intelligenter technischer Systeme}}},
  volume       = {{342}},
  year         = {{2015}},
}

@unpublished{16449,
  abstract     = {{We consider the following variant of the two dimensional gathering problem
for swarms of robots: Given a swarm of $n$ indistinguishable, point shaped
robots on a two dimensional grid. Initially, the robots form a closed chain on
the grid and must keep this connectivity during the whole process of their
gathering. Connectivity means, that neighboring robots of the chain need to be
positioned at the same or neighboring points of the grid. In our model,
gathering means to keep shortening the chain until the robots are located
inside a $2\times 2$ subgrid. Our model is completely local (no global control,
no global coordinates, no compass, no global communication or vision, \ldots).
Each robot can only see its next constant number of left and right neighbors on
the chain. This fixed constant is called the \emph{viewing path length}. All
its operations and detections are restricted to this constant number of robots.
Other robots, even if located at neighboring or the same grid point cannot be
detected. Only based on the relative positions of its detectable chain
neighbors, a robot can decide to obtain a certain state. Based on this state
and their local knowledge, the robots do local modifications to the chain by
moving to neighboring grid points without breaking the chain. These
modifications are performed without the knowledge whether they lead to a global
progress or not. We assume the fully synchronous $\mathcal{FSYNC}$ model. For
this problem, we present a gathering algorithm which needs linear time. This
result generalizes the result from \cite{hopper}, where an open chain with
specified distinguishable (and fixed) endpoints is considered.}},
  author       = {{Abshoff, Sebastian and Cord-Landwehr, Andreas  and Fischer, Matthias and Jung, Daniel and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{arXiv:1510.05454}},
  title        = {{{Gathering a Closed Chain of Robots on a Grid}}},
  year         = {{2015}},
}

@inproceedings{17439,
  abstract     = {{Viele virtuelle 3-D-Szenen im industriellen Bereich sind nicht gleichmäßig strukturiert, z.B. weil sie eine stark unterschiedliche Dichteverteilung der Polygone aufweisen. Für solch heterogene Daten existiert kein Algorithmus, der die Gesamtheit der Daten sowohl schnell als auch mit guter Qualität darstellen kann. Die Auswahl der richtigen Algorithmen für einzelne Szenenteile durch einen Experten ist zeitintensiv und in vielen Visualisierungssystemen nicht umzusetzen. Um dieses Problem zu lösen, setzt das hier vorgestellte Multi-Algorithmen-Rendering verschiedene Renderingalgorithmen gleichzeitig ein, um eine virtuelle 3-D-Szene darzustellen. Das Verfahren unterteilt die Szene dafür in einem Vorverarbeitungsschritt automatisch in geeignete Teilregionen und bestimmt deren Eigenschaften. Diese Daten werden zur Laufzeit dazu genutzt, um ständig für den aktuellen Standpunkt des Betrachters eine Abschätzung der Qualität und Laufzeit der zur Auswahl stehenden Renderingalgorithmen zu berechnen. Durch die Lösung eines Optimierungsproblems kann so bei vorgegebener Bildrate durch die passende Zuordnung der Algorithmen zu den Regionen die Bildqualität optimiert werden – bei automatischer Anpassung an die Leistungsfähigkeit der eingesetzten Hardware. In einer experimentellen Evaluierung vergleichen wir die Laufzeit und Bildqualität des Verfahrens mit denen verbreiteter Standardrenderingverfahren.}},
  author       = {{Petring, Ralf and Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{11. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{49----60}},
  title        = {{{Darstellung heterogener 3-D-Szenen in Echtzeit}}},
  volume       = {{311}},
  year         = {{2013}},
}

@inproceedings{16393,
  abstract     = {{Many 3D scenes (e.g. generated from CAD data) are composed of a multitude of objects that are nested in each other. A showroom, for instance, may contain multiple cars and every car has a gearbox with many gearwheels located inside. Because the objects occlude each other, only few are visible from outside. We present a new technique, Spherical Visibility Sampling (SVS), for real-time 3D rendering of such -- possibly highly complex -- scenes. SVS exploits the occlusion and annotates hierarchically structured objects with directional visibility information in a preprocessing step. For different directions, the directional visibility encodes which objects of a scene's region are visible from the outside of the regions' enclosing bounding sphere. Since there is no need to store a separate view space subdivision as in most techniques based on preprocessed visibility, a small memory footprint is achieved. Using the directional visibility information for an interactive walkthrough, the potentially visible objects can be retrieved very efficiently without the need for further visibility tests. Our evaluation shows that using SVS allows to preprocess complex 3D scenes fast and to visualize them in real time (e.g. a Power Plant model and five animated Boeing 777 models with billions of triangles). Because SVS does not require hardware support for occlusion culling during rendering, it is even applicable for rendering large scenes on mobile devices.}},
  author       = {{Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Computer Graphics Forum}},
  issn         = {{0167-7055}},
  number       = {{4}},
  pages        = {{49--58}},
  title        = {{{Spherical Visibility Sampling}}},
  doi          = {{10.1111/cgf.12150}},
  volume       = {{32}},
  year         = {{2013}},
}

@inbook{16406,
  abstract     = {{In order to evaluate the efficiency of algorithms for real-time 3D rendering, different properties like rendering time, occluded triangles, or image quality, need to be investigated. Since these properties depend on the position of the camera, usually some camera path is chosen, along which the measurements are performed. As those measurements cover only a small part of the scene, this approach hardly allows drawing conclusions regarding the algorithm's properties at arbitrary positions in the scene. The presented method allows the systematic and position-independent evaluation of rendering algorithms. It uses an adaptive sampling approach to approximate the distribution of a property (like rendering time) for all positions in the scene. This approximation can be visualized to produce an intuitive impression of the algorithm's behavior or be statistically analyzed for objectively rating and comparing algorithms. We demonstrate our method by evaluating performance aspects of a known occlusion culling algorithm.
}},
  author       = {{Jähn, Claudius and Eikel, Benjamin and Fischer, Matthias and Petring, Ralf and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Advances in Visual Computing}},
  isbn         = {{9783642419133}},
  issn         = {{0302-9743}},
  title        = {{{Evaluation of Rendering Algorithms Using Position-Dependent Scene Properties}}},
  doi          = {{10.1007/978-3-642-41914-0_12}},
  year         = {{2013}},
}

@inbook{16407,
  abstract     = {{Many virtual 3D scenes, especially those that are large, are not structured evenly. For such heterogeneous data, there is no single algorithm that is able to render every scene type at each position fast and with the same high image quality. For a small set of scenes, this situation can be improved if different rendering algorithms are manually assigned to particular parts of the scene by an experienced user. We introduce the Multi-Algorithm-Rendering method. It automatically deploys different rendering algorithms simultaneously for a broad range of scene types. The method divides the scene into subregions and measures the behavior of different algorithms for each region in a preprocessing step. During runtime, this data is utilized to compute an estimate for the quality and running time of the available rendering algorithms from the observer's point of view. By solving an optimizing problem, the image quality can be optimized by an assignment of algorithms to regions while keeping the frame rate almost constant.
}},
  author       = {{Petring, Ralf and Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Advances in Visual Computing}},
  isbn         = {{9783642419133}},
  issn         = {{0302-9743}},
  title        = {{{Real-Time 3D Rendering of Heterogeneous Scenes}}},
  doi          = {{10.1007/978-3-642-41914-0_44}},
  year         = {{2013}},
}

@inproceedings{16408,
  abstract     = {{We present a parallel rendering system for heterogeneous PC clusters to visualize massive models. One single, powerful visualization node is supported by a group of backend nodes with weak graphics performance. While the visualization node renders the visible objects, the backend nodes asynchronously perform visibility tests and supply the front end with visible scene objects. The visualization node stores only currently visible objects in its memory, while the scene is distributed among the backend nodes’ memory without redundancy. To efficiently compute the occlusion tests in spite of that each backend node stores only a fraction of the original geometry, we complete the scene by adding highly simplified versions of the objects stored on other nodes. We test our system with 15 backend nodes. It is able to render a ≈ 350,M polygons (≈ 8.5,GiB) large aircraft model with 20, to 30,fps and thus allows a walk-through in real-time.
}},
  author       = {{Suess, Tim and Koch, Clemens and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Advances in Visual Computing}},
  isbn         = {{9783642331787}},
  issn         = {{0302-9743}},
  pages        = {{502--512}},
  title        = {{{Asynchronous Occlusion Culling on Heterogeneous PC Clusters for Distributed 3D Scenes}}},
  doi          = {{10.1007/978-3-642-33179-4_48}},
  volume       = {{7431}},
  year         = {{2012}},
}

@inproceedings{17420,
  abstract     = {{Occlusion culling is a common approach to accelerate real-time rendering of polygonal 3D-scenes by reducing the rendering load. Especially for large scenes, it is necessary to remove occluded objects to achieve a frame rate that provides an interactive environment. In order to benefit from the culling properly, often hierarchical data structures are used. These data structures typically create a spatial subdivision of a given scene into axis-aligned bounding boxes. These boxes can be tested quickly, but they are not very precise. By using these boxes, the included objects are detected as visible, even if other objects occlude them (false-positives). To get perfect results, the models original geometry included in the box has to be tested, but this would require too much computational power. To overcome this problem, original objects approximations could be used, but typical methods for mesh simplification cannot be applied, because they do not create an outer hull for a given object. We present a model simplification algorithm, which generates simple outer hulls, consisting of only few more triangles than a box, while preserving an objects shape better than a corresponding bounding box. This approach is then extended to a hierarchical data structure, the so-called hull tree, that can be generated for a given scene to improve the visibility tests. Next, we present an approximative rendering algorithm, which combines the features of the hull tree with the use of inner hulls for efficient occlusion detection and global state-sorting of the visible objects.
}},
  author       = {{Suess, Tim and Koch, Clemens and Jähn, Claudius and Fischer, Matthias}},
  booktitle    = {{Proceedings of the Graphics Interface 2011 Conference, May 25-27, St. John's, Newfoundland, Canada}},
  editor       = {{Brooks, Stephen and Irani, Pourang}},
  pages        = {{79----86}},
  publisher    = {{Canadian Human-Computer Communications Society}},
  title        = {{{Approximative occlusion culling using the hull tree}}},
  year         = {{2011}},
}

@inproceedings{17421,
  author       = {{Klaas, Alexander and Laroque, Christoph and Dangelmaier, Wilhelm and Fischer, Matthias}},
  booktitle    = {{Proceedings of the 2011 Winter Simulation Conference (WSC)}},
  isbn         = {{9781457721090}},
  title        = {{{Simulation aided, knowledge based routing for AGVs in a distribution warehouse}}},
  doi          = {{10.1109/wsc.2011.6147883}},
  year         = {{2011}},
}

@inproceedings{17450,
  author       = {{Suess, Tim and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm and Koch, Clemens}},
  booktitle    = {{Augmented & Virtual Reality in der Produktentstehung}},
  pages        = {{185----197}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts}},
  title        = {{{Ein paralleles Out-of-Core Renderingsystem für Standard-Rechnernetze}}},
  volume       = {{295}},
  year         = {{2011}},
}

@inproceedings{18193,
  author       = {{Renken, Hendrik and Laroque, Christoph and Fischer, Matthias}},
  booktitle    = {{Proceedings of The 25th European Simulation and Modelling Conference - ESM2011}},
  title        = {{{An Easy Extendable Modeling Framework for Discrete Event Simulation Models and their Visualization}}},
  year         = {{2011}},
}

@unpublished{18194,
  abstract     = {{We present a parallel rendering system for PC-Clusters to visualize large 3D scenes. One single visualization node, equipped with a high-end graphics adapter, is supported by a group of backend nodes with weak graphics performance. The objects of the scene are distributed among these backend nodes, they serve two purposes: First, they provide an out-of-core memory system for the visualization node. Second, they assist the visualization node's rendering by performing visibility calculations and only sending visible objects to the visualization node. In order to obtain fast rendering with our system, we have to distribute the objects among the backend nodes in a way that does not only guarantee an even distribution of the objects, but also an even distribution of the visibility calculations and the amount of data send to the visualization node. We identify necessary properties of the distribution and argue that a random distribution is a good candidate. Further, in order to reduce the number of objects sent to the visualization node per frame, we employ an approximate hierarchical occlusion culling in each backend node. For this, they are equipped, in addition to the objects assigned to them, with simplified versions of the other objects of the 3D scene. The visualization node is equipped with 512 MiB video memory and supported by 15 backend nodes. This system is able to render a approx. 350 million polygons (approx. 8.5 GiB) large aircraft model between 20 - 30 fps and thus allows a walkthrough in real-time.}},
  author       = {{Suess, Tim and Koch, Clemens and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  title        = {{{Parallel Out-of-Core Occlusion Culling}}},
  year         = {{2011}},
}

@inproceedings{16410,
  abstract     = {{Gathering n mobile robots in one single point in the Euclidean plane is a widely studied problem from the area of robot formation problems. Classically, the robots are assumed to have no physical extent, and they are able to share a position with other robots. We drop these assumptions and investigate a similar problem for robots with (a spherical) extent: the goal is to gather the robots as close together as possible. More exactly, we want the robots to form a sphere with minimum radius around a predefined point. We propose an algorithm for this problem which synchronously moves the robots towards the center of the sphere unless they block each other. In this case, if possible, the robots spin around the center of the sphere. We analyze this algorithm experimentally in the plane. If R is the distance of the farthest robot to the center of the sphere, the simulations indicate a runtime which is linear in n and R. Additionally, we prove a theoretic upper bound for the runtime of O(nR) for a discrete version of the problem. Simulations also suggest a runtime of O(n + R) for the discrete version.}},
  author       = {{Cord-Landwehr, Andreas and Degener, Bastian and Fischer, Matthias and Hüllmann, Martina and Kempkes, Barbara and Klaas, Alexander and Kling, Peter and Kurras, Sven and Märtens, Marcus and Meyer auf der Heide, Friedhelm and Raupach, Christoph and Swierkot, Kamil and Warner, Daniel and Weddemann, Christoph and Wonisch, Daniel}},
  booktitle    = {{37th International Conference on Current Trends in Theory and Practice of Computer Science (SOFSEM 2011)}},
  isbn         = {{9783642183805}},
  issn         = {{0302-9743}},
  number       = {{6543}},
  pages        = {{178--189}},
  publisher    = {{Springer}},
  title        = {{{Collisionless Gathering of Robots with an Extent}}},
  doi          = {{10.1007/978-3-642-18381-2_15}},
  year         = {{2011}},
}

@inbook{16409,
  abstract     = {{Given a set of n mobile robots in the d-dimensional Euclidean space, the goal is to let them converge to a single not predefined point. The challenge is that the robots are limited in their capabilities. Robots can, upon activation, compute the positions of all other robots using an individual affine coordinate system. The robots are indistinguishable, oblivious and may have different affine coordinate systems. A very general discrete time model assumes that robots are activated in arbitrary order. Further, the computation of a new target point may happen much earlier than the movement, so that the movement is based on outdated information about other robot's positions. Time is measured as the number of rounds, where a round ends as soon as each robot has moved at least once. In [Cohen, Peleg: Convergence properties of gravitational algorithms in asynchronous robot systems], the Center of Gravity is considered as target function, convergence was proven, and the number of rounds needed for halving the diameter of the convex hull of the robot's positions was shown to be O(n^2) and Omega(n). We present an easy-to-check property of target functions that guarantee convergence and yields upper time bounds. This property intuitively says that when a robot computes a new target point, this point is significantly within the current axes aligned minimal box containing all robots. This property holds, e.g., for the above-mentioned target function, and improves the above O(n^2) to an asymptotically optimal O(n) upper bound. Our technique also yields a constant time bound for a target function that requires all robots having identical coordinate axes.
}},
  author       = {{Cord-Landwehr, Andreas and Degener, Bastian and Fischer, Matthias and Hüllmann, Martina and Kempkes, Barbara and Klaas, Alexander and Kling, Peter and Kurras, Sven and Märtens, Marcus and Meyer auf der Heide, Friedhelm and Raupach, Christoph and Swierkot, Kamil and Warner, Daniel and Weddemann, Christoph and Wonisch, Daniel}},
  booktitle    = {{Automata, Languages and Programming}},
  isbn         = {{9783642220111}},
  issn         = {{0302-9743}},
  title        = {{{A New Approach for Analyzing Convergence Algorithms for Mobile Robots}}},
  doi          = {{10.1007/978-3-642-22012-8_52}},
  year         = {{2011}},
}

@inproceedings{26384,
  author       = {{Renken, Hendrik and Laroque, Christoph and Fischer, Matthias}},
  booktitle    = {{Proceedings of The 25th European Simulation and Modelling Conference - ESM2011}},
  title        = {{{An Easy Extendable Modeling Framework for Discrete Event Simulation Models and their Visualization}}},
  year         = {{2011}},
}

@inproceedings{26387,
  author       = {{Klaas, Alexander  and Laroque, Christoph  and Fischer, Matthias and Dangelmaier, Wilhelm}},
  booktitle    = {{Proceedings of the 2011 Winter Simulation Conference}},
  title        = {{{Simulation Aided, Knowledge Based Routing for AGVs in a Distribution Warehouse}}},
  year         = {{2011}},
}

@inproceedings{17422,
  abstract     = {{Commercial software of material flow simulations has the ability to layout the simulated models. Arranged equipment, such as conveyors or machines, includes the need to model and determine motion paths for moving objects like forklifts or automatically guided vehicles, so that the simulation framework is able to navigate all vehicles across those motion paths. After analyzing first scenarios, the user often carries out layout changes in the simulation model, e.g. moving, adding or deleting equipment. However, those changes cause time consuming, additional modeling of the motion paths for the user. Our motion planning algorithm reduces these changes by automatically determining the motion paths for moving objects, depending on an actual model layout without colliding with other objects. The algorithm works on the basis of the virtual scenes 3D-data used for the simulation models visualization. We demonstrate the technique with a multi-floor building example.}},
  author       = {{Fischer, Matthias and Renken, Hendrik and Laroque, Christoph and Schaumann, Guido and Dangelmaier, Wilhelm}},
  booktitle    = {{Proceedings of the 2010 Winter Simulation Conference}},
  isbn         = {{9781424498666}},
  title        = {{{Automated 3D-motion planning for ramps and stairs in intra-logistics material flow simulations}}},
  doi          = {{10.1109/wsc.2010.5678906}},
  year         = {{2010}},
}

