@inproceedings{18279, abstract = {{For $c in REAL$, a $c$-spanner is a subgraph of a complete Euclidean graph satisfying that between any two vertices there exists a path of weighted length at most $c$ times their geometric distance. Based on this property to approximate a complete weighted graph, sparse spanners have found many applications, e.g., in FPTAS, geometric searching, and radio networks. For geometric searching, it turned out to suffice whether the radius rather than the length of some path between any two vertices is bounded relatively to their geometric distance; this is the defining property of weak spanners. Finally regarding radio network applications, a power spanner accounts for the total energy afforded for a wireless transmission with the requirement that the sum of the squares of the lengths of some path between any two planar vertices must be bounded relatively to the square of their geometric distance (or higher powers up to 6 or even 8).

While it is known that any $c$-spanner is also both a weak $C_1$-spanner and a $C_2$-power spanner (for appropriate $C_1,C_2$ depending only on $c$ but not on the graph under consideration), we show that the converse fails: There exists a family of $c_1$-power spanners that are no weak $C$-spanners and also a family of weak $c_2$-spanners that are no $C$-spanners for any fixed $C$ (and thus no uniform spanners, either). However the deepest result of the present work reveals that, surprisingly, any weak spanner is also a uniform power spanner. We further generalize the latter notion by considering $(c,delta)$-power spanners where the sum of the $delta$-th powers of the lengths has to be bounded; so $(cdot,2)$-power spanners coincide with the usual power spanners and $(cdot,1)$-power spanners are classical spanners. Interestingly, these $(cdot,delta)$-power spanners form a strict hierarchy where the above results still hold for any $deltageq2$; some even hold for $delta>1$ while counterexamples reveal others to fail for $delta<2$. In fact we show that in general every self-similar curve of fractal dimension $d>delta$ is no $(C,delta)$-power spanner for any fixed $C$. }}, author = {{Schindelhauer, Christian and Volbert, Klaus and Ziegler, Martin}}, booktitle = {{Proc. of 15th Annual International Symposium on Algorithms and Computation (ISAAC'04)}}, isbn = {{9783540241317}}, issn = {{0302-9743}}, pages = {{805--821}}, publisher = {{Springer }}, title = {{{Spanners, Weak Spanners, and Power Spanners for Wireless Networks}}}, doi = {{10.1007/978-3-540-30551-4_69}}, volume = {{3341}}, year = {{2004}}, } @inproceedings{18364, abstract = {{The visualisation of manufacturing-processes assists the user in understanding and analysis. Typically he can move free and unguided in a virtual environment which visualizes the entire process. Thus knowledge and conclusions are to some extend acquired on a random base. This article describes the development of a tool, which enables the user to interactively improve significant production processes in the simulation. He moves in a virtual 3D-environment (walkthrough system) and is able to acquire automatically calculated indications for significant processes. At the same time the simulation considers significant objects in a more detailed way. If the viewer is interested in a significant process, he is automatically guided to the relevant location where he can examine the critical situation by modification of the simulation model.}}, author = {{Mueck, Bengt and Dangelmaier, Wilhelm and Laroque, Christoph and Fischer, Matthias and Kortenjan, Michael}}, booktitle = {{Simulation and Visualisation 2004}}, pages = {{73--83}}, publisher = {{SCS European Publishing House}}, title = {{{Guidance of Users in Interactive 3D-Visualisations of Material Flow Simulations}}}, year = {{2004}}, } @article{18447, author = {{Oesterdiekhoff, Brigitte}}, journal = {{Informatik Spektrum}}, number = {{5}}, pages = {{448--452}}, title = {{{Transcoding von Webinhalten}}}, volume = {{27}}, year = {{2004}}, } @inproceedings{18448, author = {{Oesterdiekhoff, Brigitte}}, booktitle = {{Proceedings of IFIP Working Conference on Distributed and Parallel Embedded Systems (DIPES'04)}}, title = {{{Internet Premium Services for Flexible Format Distributed Devices}}}, year = {{2004}}, } @inproceedings{16474, abstract = {{Given n distinct points p1, p2, ... , pn in the plane, the map labeling problem with four squares is to place n axis-parallel equi-sized squares Q1, ... ,Qn of maximum possible size such that pi is a corner of Qi and no two squares overlap. This problem is NP-hard and no algorithm with approximation ratio better than 1/2 exists unless P = NP [10]. In this paper, we consider a scenario where we want to visualize the information gathered by smart dust, i.e. by a large set of simple devices, each consisting of a sensor and a sender that can gather sensor data and send it to a central station. Our task is to label (the positions of) these sensors in a way described by the labeling problem above. Since these devices are not positioned accurately (for example, they might be dropped from an airplane), this gives rise to consider the map labeling problem under the assumption, that the positions of the points are not fixed precisely, but perturbed by random noise. In other words, we consider the smoothed complexity of the map labeling problem. We present an algorithm that, under such an assumption and Gaussian random noise with sufficiently large variance, has linear smoothed complexity.}}, author = {{Bansal, Vikas and Meyer auf der Heide, Friedhelm and Sohler, Christian}}, booktitle = {{12th Annual European Symposium on Algorithms (ESA 2004)}}, isbn = {{9783540230250}}, issn = {{0302-9743}}, title = {{{Labeling Smart Dust}}}, doi = {{10.1007/978-3-540-30140-0_9}}, volume = {{3221}}, year = {{2004}}, } @inproceedings{16475, author = {{Bienkowski, Marcin and Korzeniowski, Miroslaw and Meyer auf der Heide, Friedhelm}}, booktitle = {{Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures - SPAA '04}}, isbn = {{1581138407}}, title = {{{Fighting against two adversaries}}}, doi = {{10.1145/1007912.1007923}}, year = {{2004}}, } @article{16477, author = {{Meyer auf der Heide, Friedhelm and Schindelhauer, Christian and Volbert, Klaus and Grünewald, Matthias}}, issn = {{1432-4350}}, journal = {{Theory of Computing Systems}}, pages = {{343--370}}, title = {{{Congestion, Dilation, and Energy in Radio Networks}}}, doi = {{10.1007/s00224-004-1124-z}}, year = {{2004}}, } @inproceedings{16480, author = {{Leonardi, S. and Marchetti-Spaccamela, A. and Meyer auf der Heide, Friedhelm}}, booktitle = {{SPAA '04: Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures}}, isbn = {{1581138407}}, title = {{{Scheduling against an adversarial network}}}, doi = {{10.1145/1007912.1007936}}, year = {{2004}}, } @article{16399, abstract = {{We present a new data structure for rendering highly complex virtual environments of arbitrary topology. The special feature of our approach is that it allows an interactive navigation in very large scenes (30 GB/400 million polygons in our benchmark scenes) that cannot be stored in main memory, but only on a local or remote hard disk. Furthermore, it allows interactive rendering of substantially more complex scenes by instantiating objects. The sampling process is done in the preprocessing. There, the polygons are randomly distributed in our hierarchical data structure, the randomized sample tree. This tree only uses space that is linear in the number of polygons. In order to produce an approximate image of the scene, the tree is traversed and polygons stored in the visited nodes are rendered. During the interactive walkthrough, parts of the sample tree are loaded from local or remote hard disk. We implemented our algorithm in a prototypical walkthrough system. Analysis and experiments show that the quality of our images is comparable to images computed by the conventional z-buffer algorithm regardless of the scene topology.}}, author = {{Klein, Jan and Krokowski, Jens and Fischer, Matthias and Wand, Michael and Wanka, Rolf and Meyer auf der Heide, Friedhelm}}, issn = {{1054-7460}}, journal = {{Presence: Teleoperators and Virtual Environments}}, pages = {{617--637}}, title = {{{The Randomized Sample Tree: A Data Structure for Interactive Walk-Throughs in Externally Stored Virtual Environments}}}, doi = {{10.1162/1054746043280619}}, year = {{2004}}, } @inproceedings{13071, author = {{Liu Jing, Michelle and Ruehrup, Stefan and Schindelhauer, Christian and Volbert, Klaus and Dierkes, Martin and Bellgardt, Andreas and Ibers, Rüdiger and Hilleringmann, Ulrich}}, booktitle = {{{GOR/NGB Conference Tilburg 2004}}}, title = {{{Sensor Networks with More Features Using Less Hardware}}}, year = {{2004}}, } @article{19726, abstract = {{The Paderborn University BSP (PUB) library is a C communication library based on the BSP model. The basic library supports buffered as well as unbuffered non-blocking communication between any pair of processors and a mechanism for synchronizing the processors in a barrier style. In addition, PUB provides non-blocking collective communication operations on arbitrary subsets of processors, the ability to partition the processors into independent groups that execute asynchronously from each other, and a zero-cost synchronization mechanism. Furthermore, some techniques used in the implementation of the PUB library deviate significantly from the techniques used in other BSP libraries.}}, author = {{Bonorden, Olaf and Juurlink, Bernhardus and von Otte, Ingo and Rieping, Ingo}}, issn = {{0167-8191}}, journal = {{Parallel Computing}}, pages = {{187--207}}, title = {{{The Paderborn University BSP (PUB) library}}}, doi = {{10.1016/s0167-8191(02)00218-1}}, year = {{2003}}, } @article{19785, author = {{Salzwedel, Kay A.}}, isbn = {{9783540008835}}, issn = {{0302-9743}}, journal = {{Algorithms for Memory Hierarchies}}, title = {{{Algorithmic Approaches for Storage Networks}}}, doi = {{10.1007/3-540-36574-5_12}}, volume = {{2625}}, year = {{2003}}, } @inproceedings{19790, abstract = {{The advances in Internet technology have led to tremendous improvements in business, education, and science and have changed the way we think, live, and communicate. Information exchange has become ubiquitous by the possibilities offered through modern technologies. We are able to offer information 24 hours a day through our web sites and can leave messages every time and from anywhere in the world. This change in communication has led to new challenges. Enterprises have to deal with an information amount that doubles every year. The technological foundation to cope with this information explosion is given by Storage Area Networks (SANs), which are able to connect a great number of storage systems over a fast interconnection network. However, to be able to use the benefits of a SAN, an easy-to-use and efficient management support has to be given to the storage administrator. In this paper, we will suggest new storage management concepts and we will introduce a new management environment that is able to significantly reduce management costs and increases the performance and resource utilization of the given SAN infrastructure.}}, author = {{Scheideler, Christian and Salzwedel, Kay and Meyer auf der Heide, Friedhelm and Brinkmann, André and Vodisek, Mario and Rückert, Ulrich}}, booktitle = {{Proceedings of SSGRR 2003}}, title = {{{Storage Management as Means to cope with Exponential Information Growth}}}, year = {{2003}}, } @inproceedings{19806, abstract = {{We try to close the gap between theoretical investigations of wireless network topologies and realistic wireless environments. For point-to-point communication, we examine theoretically well-analyzed sparse graphs, i.e. the Yao-graph, the SparsY-graph, and the SymmY-graph. We present distributed algorithms that can be used to build up these graphs in time $O(log n)$ per node without the use of any geo-graphical positioning system. Our algorithms are based only on local knowledge and local decisions and make use of power control to establish communication links with low energy-cost. We compare these algorithms with respect to congestion, dilation, and energy. For congestion we introduce different measures that allow us to investigate the difference between real-world wireless networks and models for wireless communication at a high level of abstraction. For more realistic simulations we extend our simulation environment SAHNE. We use a realistic transmission model for directed communication that uses sector subdivision. Finally, our experimental results show that our topologies and algorithms work well in a distributed environment and we give some recommendations for the topology control based on our simulations.}}, author = {{Rührup, Stefan and Schindelhauer, Christian and Volbert, Klaus and Grünewald, M.}}, booktitle = {{Proceedings of the International Parallel and Distributed Processing Symposium (IPDPS)}}, isbn = {{0769519261}}, title = {{{Performance of distributed algorithms for topology control in wireless networks}}}, doi = {{10.1109/ipdps.2003.1213107}}, year = {{2003}}, } @misc{19828, author = {{Mahlmann, Peter}}, title = {{{Implementierung und Vergleich von Verfahren zum Information Retrieval im World Wide Web}}}, year = {{2003}}, } @inproceedings{19833, abstract = {{Communication facilities are important in Robotics if several robots have to work together. In this paper, we describe problems and solutions encountered while designing an infrared-based communication device for the mini robot Khepera. In contrast to traditional omnidirectional systems, it features directed, power-variable transmission in eight directions at unit[23.4]kbps up to a range of unit[1m]. It can differentiate incoming data signals from interference from adjacent sectors and can estimate their direction-of-arrival. We model the transmission over the infrared channel and show how interference influences the reception of the data signals. We also describe methods how to reduce these effects. We have tested the performance of the resulted signal processing in a worst case scenario by simulations and in experiments with a prototype implementation. The resulted module is especially suited for experimental evaluation of ad hoc network protocols and for position estimation.}}, author = {{Volbert, Klaus and Grünewald, Matthias and Schindelhauer, Christian and Rückert, Ulrich}}, booktitle = {{Proceedings of the 2nd International Conference on Autonomous Minirobots for Research and Edutainment}}, pages = {{113--122}}, title = {{{Directed power-variable infrared communication for the mini robot Khepera}}}, year = {{2003}}, } @inproceedings{19874, abstract = {{We present a novel framework for hierarchical collision detection that can be applied to virtually all bounding volume (BV) hierarchies. It allows an application to trade quality for speed. Our algorithm yields an estimation of the quality, so that applications can specify the desired quality. In a timecritical system, applications can specify the maximum time budget instead, and quantitatively assess the quality of the results returned by the collision detection afterwards.}}, author = {{Klein, Jan and Zachmann, Gabriel}}, booktitle = {{Proc. 8th International Fall Workshop Vision, Modeling, and Visualization (VMV 2003)}}, pages = {{37--45}}, title = {{{ADB-Trees: Controlling the Error of Time-Critical Collision Detection}}}, year = {{2003}}, } @inproceedings{19900, author = {{Klein, Jan and Zachmann, Gabriel}}, booktitle = {{ Proc. ACM Symposium on Virtual Reality Software and Technology (VRST 2003)}}, pages = {{22--31}}, title = {{{Time-Critical Collision Detection Using an Average-Case Approach}}}, doi = {{10.1145/1008653.1008660}}, year = {{2003}}, } @inproceedings{19952, abstract = {{Graph minors theory, developed by Robertson & Seymour, provides a list of powerful theoretical results and tools. However, the wide spread opinion in Graph Algorithms community about this theory is that it is mainly of theoretical importance. The main purpose of this paper is to show how very deep min-max and duality theorems from Graph Minors can be used to obtain essential speed-up to many known algorithms on different domination problems.}}, author = {{Fomin, Fedor V. and Thilikos, Dimitrios M.}}, booktitle = {{Proceedings of the 14th ACM-SIAM Symposium on Discrete Algorithms (SODA 2003)}}, issn = {{0097-5397}}, title = {{{Dominating Sets in Planar Graphs: Branch-Width and Exponential Speed-Up}}}, doi = {{10.1137/s0097539702419649}}, year = {{2003}}, } @inproceedings{24273, author = {{Terbahl, Martina and Krokowski, Jens}}, booktitle = {{Proceedings of 5. GI-Informatiktage 2003}}, title = {{{Verteiltes Rendern durch dynamische Bildaufteilung}}}, year = {{2003}}, } @inproceedings{26263, author = {{Ziegler, Martin}}, booktitle = {{Proc. 5th Conference on Real Numbers and Computers (RNC5), INRIA}}, pages = {{47--64}}, title = {{{Stability versus Speed in a Computable Algebraic Model}}}, year = {{2003}}, } @inproceedings{26277, author = {{Ziegler, Martin}}, booktitle = {{Computability and Complexity in Analysis}}, pages = {{389--406}}, title = {{{Computable Operators on Regular Sets}}}, volume = {{302-8/2003}}, year = {{2003}}, } @inproceedings{2128, author = {{Damerow, Valentina and Meyer auf der Heide, Friedhelm and Räcke, Harald and Scheideler, Christian and Sohler, Christian}}, booktitle = {{ESA}}, pages = {{161----171}}, publisher = {{Springer}}, title = {{{Smoothed Motion Complexity}}}, doi = {{10.1007/978-3-540-39658-1_17}}, volume = {{2832}}, year = {{2003}}, } @inproceedings{2129, author = {{Awerbuch, Baruch and Brinkmann, André and Scheideler, Christian}}, booktitle = {{ICALP}}, pages = {{1153----1168}}, publisher = {{Springer}}, title = {{{Anycasting in Adversarial Systems: Routing and Admission Control}}}, volume = {{2719}}, year = {{2003}}, } @inproceedings{17423, author = {{Mueck, Bengt and Dangelmaier, Wilhelm and Fischer, Matthias}}, booktitle = {{15th European Simulation Symposium (ESS 2003)}}, pages = {{367--371}}, publisher = {{SCS - Europe}}, title = {{{Components for the Active Support of the Analysis of Material Flow Simulations in a Virtual Environment}}}, year = {{2003}}, } @inproceedings{18791, abstract = {{We consider the problem of finding the weight of a Euclidean minimum spanning tree for a set of n points in ℝd. We focus on the situation when the input point set is supported by certain basic (and commonly used) geometric data structures that can provide efficient access to the input in a structured way. We present an algorithm that estimates with high probability the weight of a Euclidean minimum spanning tree of a set of points to within 1 + ε using only \~{O}(√ poly(1/ε)) queries for constant d. The algorithm assumes that the input is supported by a minimal bounding cube enclosing it, by orthogonal range queries, and by cone approximate nearest neighbors queries.}}, author = {{Magen, Avner and Ergun, Funda and Sohler, Christian and Rubinfeld, Ronitt and Czumaj, Artur and Newman, Ilan and Fortnow, Lance}}, booktitle = {{Proceedings of the 14th ACM-SIAM Symposium on Discrete Algorithms (SODA 2003)}}, isbn = {{0898715385}}, pages = {{813–822}}, title = {{{Sublinear Approximation of Euclidean Minimum Spanning Tree}}}, year = {{2003}}, } @inproceedings{18907, abstract = {{In a (randomized) oblivious routing scheme the path chosen for a request
between a source $s$ and a target $t$ is independent from the current traffic
in the network. Hence, such a scheme consists of probability distributions
over $s-t$ paths for every source-target pair $s,t$ in the network.

In a recent result citeR02 it was shown that for any undirected network
there is an oblivious routing scheme that achieves a polylogarithmic
competitive ratio with respect to congestion. Subsequently, Azar et
al. citeACF+03 gave a polynomial time algorithm that for a given network
constructs the best oblivious routing scheme, i.e. the scheme that guarantees
the best possible competitive ratio.
Unfortunately, the latter result is based on the Ellipsoid algorithm; hence
it is unpractical for large networks.

In this paper we present a combinatorial algorithm for constructing an
oblivious routing scheme that guarantees a competitive ratio of $O(log^4n)$
for undirected networks. Furthermore, our approach yields a proof
for the existence of an oblivious routing scheme with competitive ratio
$O(log^3n)$, which is much simpler than the original proof from citeR02.}}, author = {{Bienkowski, Marcin and Korzeniowski, Miroslaw and Räcke, Harald}}, booktitle = {{Proceedings of the fifteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '03}}, isbn = {{1581136617}}, title = {{{A practical algorithm for constructing oblivious routing schemes}}}, doi = {{10.1145/777412.777418}}, year = {{2003}}, } @inproceedings{18947, abstract = {{In this paper, we define a Petri net model for the network or routing layer of a mobile ad hoc network. Such networks require routing strategies substantially different from those used in static communication networks. The model pre- sented consists of two layers, a location service and a po- sition based routing. Both are described in detail. Our ap- proach considers a very strong definition of fault tolerance thereby improving state-of-the-art ad hoc routing protocols in several respects. Modeling of the communication archi- tecture for mobile ad hoc networks is part of our overall effort towards a design methodology for distributed embed- ded real-time systems including dynamically evolving com- ponents.}}, author = {{Rust, Carsten and Stappert, Friedhelm and Lukovszki, Tamás}}, booktitle = {{7th World Multiconference on Systemics, Cybernetics and Informatics}}, title = {{{A Petri Net Model for the Network Layer of a Mobile Ad Hoc Network Architecture}}}, year = {{2003}}, } @inproceedings{18960, abstract = {{We investigate distributed algorithms for mobile ad hoc networks for moving radio stations with adjustable transmission power in a worst case scenario. We consider two models to find a reasonable restriction on the worst-case mobility. In the pedestrian model we assume a maximum speed $v_max$ of the radio stations, while in the vehicular model we assume a maximum acceleration $a_max$ of the points. Our goal is to maintain persistent routes with nice communication network properties like hop-distance, energy-consumption, congestion and number of interferences. A route is persistent, if we can guarantee that all edges of this route can be uphold for a given time span $Delta$, which is a parameter denoting the minimum time the mobile network needs to adopt changes, i.e. update routing tables, change directory entries, etc. This $Delta$ can be used as the length of an update interval for a proactive routing scheme. We extend some known notions such as transmission range, interferences, spanner, power spanner and congestion to both mobility models and introduce a new parameter called crowdedness that states a lower bound on the number of radio interferences. Then we prove that a mobile spanner hosts a path system that polylogarithmically approximates the optimal congestion. We present distributed algorithms based on a grid clustering technique and a high-dimensional representation of the dynamical start situation which construct mobile spanners with low congestion, low interference number, low energy-consumption, and low degree. We measure the optimality of the output of our algorithm by comparing it with the optimal choice of persistent routes under the same circumstances with respect to pedestrian or vehicular worst-case movements. Finally, we present solutions for dynamic position information management under our mobility models.}}, author = {{Schindelhauer, Christian and Lukovszki, Tamás and Rührup, Stefan and Volbert, Klaus}}, booktitle = {{Proc. of the 15th ACM Symposium on Parallel Algorithms and Architectures (SPAA03)}}, isbn = {{1581136617}}, title = {{{Worst case mobility in ad hoc networks}}}, doi = {{10.1145/777412.777448}}, year = {{2003}}, } @inproceedings{18966, abstract = {{A recent seminal result of Räcke is that for any undirected network there is an oblivious routing algorithm with a polylogarithmic competitive ratio with respect to congestion. Unfortunately, Räcke's construction is not polynomial time. We give a polynomial time construction that guarantees Räcke's bounds, and more generally gives the true optimal ratio for any (undirected or directed) network.}}, author = {{Azar, Yossi and Cohen, Edith and Fiat, Amos and Kaplan, Haim and Racke, Harald}}, booktitle = {{Proceedings of the thirty-fifth ACM symposium on Theory of computing - STOC '03}}, isbn = {{1581136749}}, title = {{{Optimal oblivious routing in polynomial time}}}, doi = {{10.1145/780542.780599}}, year = {{2003}}, } @misc{18982, abstract = {{In dieser Studienarbeit wurde ein System entworfen und implementiert, das der Ausführung paralleler Algorithmen nach dem Bulk-Synchronous Parallel (BSP)-Modell dient. Von der Paderborn University BSP Library (PUB) unterscheidet es sich dadurch, dass es vom Einsatzgebiet her nicht für Parallelrechner konzipiert ist, sondern vielmehr für eine Ansammlung von PCs und Workstations, die über das gesamte Internet verteilt sind.
Gegenüber anderen bekannten Web-Computing Projekten wie z.B. SETI@home oder distributed.net zeichnet sich dieses System dadurch aus, dass nicht Clients von einem zentralen Server "häppchenweise" unabhängige Teilprobleme anfordern und lösen, sondern dass die Clients gemeinsam an einem Problem arbeiten, indem sie nach dem BSP-Modell miteinander kommunizieren und sich synchronisieren.}}, author = {{Gehweiler, Joachim}}, title = {{{Entwurf und Implementierung einer Laufzeitumgebung für parallele Algorithmen in Java}}}, year = {{2003}}, } @article{20435, author = {{Hamann, Heiko}}, journal = {{Complex Systems}}, number = {{3}}, pages = {{263----268}}, title = {{{Definition and Behavior of Langton's Ant in Three Dimensions}}}, volume = {{14}}, year = {{2003}}, } @inproceedings{18196, abstract = {{Fast algorithms for arithmetic on real or complex polynomials are well-known and have proven to be not only asymptotically efficient but also very practical. Based on FAST FOURIER TRANSFORM, they for instance multiply two polynomials of degree up to N or multi-evaluate one at N points simultaneously within quasi-linear time O(N polylog N). An extension to (and in fact the mere definition of) polynomials over fields R and C to the SKEW-field H of quaternions is promising but still missing. The present work proposes three approaches which in the commutative case coincide but for H turn out to differ, each one satisfying some desirable properties while lacking others. For each notion, we devise algorithms for according arithmetic; these are quasi-optimal in that their running times match lower complexity bounds up to polylogarithmic factors.}}, author = {{Ziegler, Martin}}, booktitle = {{Proc. 14th Annual International Symposium on Algorithms and Computation (ISAAC'03)}}, isbn = {{9783540206958}}, issn = {{0302-9743}}, pages = {{705--715}}, title = {{{Quasi-optimal Arithmetic for Quaternion Polynomials}}}, doi = {{10.1007/978-3-540-24587-2_72}}, year = {{2003}}, } @inbook{18258, abstract = {{Multi-evaluation of the Coulomb potential induced by N particles is a central part of N-body simulations. In 3D, known subquadratic time algorithms return approximations up to given ABSOLUTE precision. By combining data structures from Computational Geometry with fast polynomial arithmetic, the present work obtains approximations of prescribable RELATIVE error e>0 in time O(1/e*N*polylog N).}}, author = {{Ziegler, Martin}}, booktitle = {{Lecture Notes in Computer Science}}, editor = {{Dehne, F. and Sack, JR. and Smid, M.}}, isbn = {{9783540405450}}, issn = {{0302-9743}}, publisher = {{Springer}}, title = {{{Fast Relative Approximation of Potential Fields}}}, doi = {{10.1007/978-3-540-45078-8_13}}, volume = {{2748}}, year = {{2003}}, } @inproceedings{18367, abstract = {{Unternehmen operieren zunehmend in einem schwierigen Umfeld: Die Innovationsdynamik nimmt zu; die Produktlebenszyklen werden kürzer; gleichzeitig werden die Produkte komplexer; der harte Wettbewerb zwingt die Unternehmen, auf Marktveränderungen zu reagieren. Aus dieser Entwicklung resultieren hohe Anforderungen an die Gestaltung der Fertigungsprozesse. Im Wesentlichen kommt es darauf an, die Fertigungsprozesse möglichst rasch an die neuen Gegebenheiten anzupassen, bzw. neue Fertigungsprozesse so zu planen, dass sie auf Anhieb die erforderlichen Resultate bringen. Ein wichtiges Mittel hierfür der Einsatz von Materialflusssimulationen. Hierzu ist zunächst die Erstellung eines Simulationsmodells notwendig. Dafür wird in einem ersten Schritt das zu betrachtende System analysiert und ein rechnerinternes Modell erzeugt. Dieses beinhaltet die Modellierung von Funktionen, Prozessen, Verhaltensweisen oder Regeln, die im Modell die tatsächlichen Wirkzusammenhänge im Unternehmen widerspiegeln sollen. Die so modellierten Aspekte sind untereinander so vernetzt, dass alle Funktionen des Modells ein Ganzes ergeben. Für viele Fragenstellungen werden umfangreiche Modelle mit einem komplexen Verhalten benötigt. Andererseits steigt mit zunehmender Größe und Komplexität des Simulationsmodells auch der Modellierungsaufwand, die Fehleranfälligkeit, die Laufzeit und der Interpretationsaufwand bei der Ergebnisauswertung. Fehler bei der Modellbildung führen bei der Simulation zu Fehlinterpretationen und falschen Ergebnissen. Einen wesentlichen Anteil daran hat die Gestaltung der Benutzungsschnittstelle: Das übliche, wenig intuitive WIMP-Interface (Windows, Icons, Mouse, Pointer) erfordert sehr gut geschulte Benutzer, sodass die Erzeugung der meist komplexen Simulationsmodelle mit großen Zeitaufwand verbunden ist. Die Präsentation der Simulationsergebnisse erfolgt in Form von Wertetabellen und zweidimensionalen, abstrakten Darstellungen des Fertigungssystems. Für die Simulationsexperten erscheint dies ausreichend, für ein aus verschiedenen Bereichen und Disziplinen zusammengesetztes Planungsteam ist das aber nicht akzeptabel. So können Fehlinterpretationen aufgrund der unklaren Darstellungen auftreten. Durch eine durchgängige Unterstützung von der Modellierung über die Ausführung bis zur Analyse von Simulationen durch Augmented-Reality und Virtual-Reality werden viele dieser Probleme überwunden aber viele neue Probleme entstehen. Marktgängige Simulatoren unterstützen zwar z.T. schon Virtual Reality; eine durchgängige Simulationsunterstützung wird aber in der Virtuellen Umgebung nicht geboten. Argumented Reality-Komponenten sind bisher nicht bekannt. In diesem Artikel werden nach einer Analyse der benötigten Technologien die Nutzenpotentiale insb. durch den Einsatz von AR ausgelotet. }}, author = {{Fischer, Matthias and Grafe, Michael and Matysczok, Carsten and Mueck, Bengt and Schoo, Michael}}, booktitle = {{Human Aspects in Production Management - Proceedings of the IFIP WG 5.7 Working Conference on Human Aspects in Production Management}}, pages = {{170--177}}, publisher = {{Shaker Verlag}}, title = {{{Virtual and Augmented Reality Support for Discrete Manufacturing System Simulation}}}, volume = {{5}}, year = {{2003}}, } @inproceedings{18372, abstract = {{Simulation und Visualisierung sind anerkannte Mittel zum Verstehen und Analysieren von Fertigungsprozessen. In Visualisierungen von Fertigungsprozessen können Betrachter frei und ungeleitet umherwandern. Erkenntnisse werden so aber eher zufällig erworben. Dieser Artikel skizziert ein System und Methoden, die den Betrachter unterstützen auf auffällige/signifikante Prozesse/ Punkte in Materialflusssimulationen aufmerksam zu werden und diese zu entschärfen. Es wird der Entwurf eines Werkzeugs beschrieben, dass den Betrachter einer Simulation die Möglichkeit bietet, signifikante Produktionsprozesse interaktiv zu verbessern. Der Benutzer wird sich in einer virtuellen 3D-Umgebung (Walkthrough-System) bewegen können und automatisch ermittelte Indizien für signifikante Abläufe erhalten. Zugleich soll die Simulation signifikante Objekte genauer simulieren. Bekundet der Benutzer Interesse an einem signifikanten Prozess, wird er automatisch zu dem jeweiligen Ort geführt werden und dort durch Eingriffe in die Simulation die kritische Situation experimentell untersuchen können. Da der kritische Moment in der Vergangenheit liegt und somit vom Betrachter schon verpasst ist, wird es dem Betrachter möglich sein, die Simulation auf einen Zeitpunkt vor dem Eintreten zurück zu setzen. Die virtuelle Szene (3D-Grafik-Modelle) einer typischen dynamischen Simulationsumgebung ist in der Regel zu komplex, um sie in Echtzeit in einem Walkthrough-System zu visualisieren und darzustellen. Typischerweise werden Approximationsverfahren eingesetzt, um die Komplexität zu reduzieren und ein flüssiges Navigieren des Betrachters zu erlauben. Durch spezifische Simulations-Anforderungen ist bekannt, an welchen Objekten des Simulationsmodells Probleme auftreten; sie sind für den Betrachter wichtig. Die zugehörigen virtuellen 3D-Repräsentanten, können von den Approximationsalgorithmen mit einer besonders hohen Darstellungsqualität dargestellt werden und die übrigen Teile der virtuellen Szene entsprechend vernachlässigt werden. Solche Approximationsalgorithmen und Datenstrukturen nutzen die spezifischen Eigenschaften virtueller Simulationsumgebungen aus, um eine hohe Darstellungsqualität und Darstellungsperformance zu erreichen. }}, author = {{Dangelmaier, Wilhelm and Franke, Werner and Mueck, Bengt and Fischer, Matthias}}, booktitle = {{2. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}}, pages = {{141--151}}, title = {{{Komponenten zur aktiven Unterstützung der Analyse von Materialflusssimulationen in virtuellen Umgebungen}}}, volume = {{123}}, year = {{2003}}, } @inproceedings{18374, abstract = {{In der heutigen Zeit operieren Unternehmen zunehmend in einem schwierigen Umfeld: Die Innovationsdynamik nimmt zu und die Produktlebenszyklen werden kürzer. Daraus resultieren hohe Anforderungen an die Planung von Fertigungssysteme. Um diesen Prozess zu unterstützen, sollen die Technologien Augmented Reality und Virtual Reality in einem integrierten System genutzt werden. Dieses System unterstützt den Anwender bei der Modellbildung, der Validierung des Simulationsmodells sowie der folgenden Optimierung des Fertigungssystems. Durch die Entwicklung geeigneter Kopplungs- bzw. Integrationsmechanismen wird eine durchgängige Nutzung der Technologien AR, VR und Simulation realisiert. Die Visualisierung der anfallenden 3D-Daten innerhalb der VR- und ARUmgebungen erfolgt mittels einer 3D-Renderinglibrary, die es durch den Einsatz von neuen entwickelten Verfahren ermöglicht, die verwendeten 3D-Modelle weitgehend automatisiert aus unternehmensinternen 3D-CAD-Modellen zu generieren.}}, author = {{Fischer, Matthias and Grafe, Michael and Matysczok, Carsten and Schoo, Michael and Mueck, Bengt}}, booktitle = {{2. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}}, pages = {{153--166}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Planung von komplexen Fertigungssystemen durch Einsatz einer VR/AR-unterstützten Simulation}}}, volume = {{123}}, year = {{2003}}, } @article{18567, author = {{Adler, Micah and Vöcking, Berthold and Sohler, Christian and Räcke, Harald and Sivadasan, Naveen}}, journal = {{Combinatorics, Probability & Computing}}, pages = {{225--244}}, title = {{{Randomized Pursuit-Evasion in Graphs}}}, year = {{2003}}, } @phdthesis{18573, author = {{Sohler, Christian}}, isbn = {{3-935433-28-X}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Property Testing and Geometry}}}, volume = {{119}}, year = {{2003}}, } @article{16481, abstract = {{ZusammenfassungVernetzte Systeme sind zu unverzichtbaren Bestandteilen unseres Umfelds geworden, zum Beispiel als Höchstleistungsrechner, als Kommunikations- und Informationssysteme oder als Planungs- und Steuerungskomponenten von Transport- und Produktionssystemen. Die ständig wachsende Komplexität solcher Systeme stellt Informatiker und Ingenieure vor immer neue Herausforderungen. In diesem Beitrag beschreibe ich die Zielsetzungen und die Struktur des SFB 376 Massive Parallelität: Algorithmen – Entwurfsmethoden – Anwendungen. Als Beispiel für unsere Arbeiten beschreibe ich einen algorithmisch orientierten Forschungszweig, in dem wir, ausgehend von theoretischen Problemen über effiziente Simulationen zwischen parallelen Rechenmodellen, Methoden, Techniken und Implementierungen entwickelt haben, die zu produktnahen Prototypen für die Speichervirtualisierung in verteilten Datenservern führen.}}, author = {{Meyer auf der Heide, Friedhelm}}, issn = {{2196-7032}}, journal = {{it - Information Technology}}, title = {{{Sonderforschungsbereich 376 Massive Parallelität: Algorithmen – Entwurfsmethoden – Anwendungen (Massively Parallel Computing: Algorithms – Design Methods – Applications)}}}, doi = {{10.1524/itit.45.2.108.19606}}, year = {{2003}}, } @article{16482, author = {{Juurlink, Bernhardus and Kolman, Petr and Meyer auf der Heide, Friedhelm and Rieping, Ingo}}, issn = {{1570-8667}}, journal = {{Journal of Discrete Algorithms}}, pages = {{151--166}}, title = {{{Optimal broadcast on parallel locality models}}}, doi = {{10.1016/s1570-8667(03)00023-6}}, year = {{2003}}, } @proceedings{16484, editor = {{Rosenberg, Arnold L. and Meyer auf der Heide, Friedhelm}}, isbn = {{1581136617}}, title = {{{Proceedings of the fifteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '03}}}, doi = {{10.1145/777412}}, year = {{2003}}, } @inproceedings{16720, author = {{Bonorden, Olaf and Bruls, N. and Kastens, U. and Le, D. K. and Meyer auf der Heide, Friedhelm and Niemann, J.-C. and Porrmann, M. and Rückert, U. and Slowik, A. and Thies, M.}}, booktitle = {{28th Annual IEEE International Conference on Local Computer Networks}}, title = {{{A holistic methodology for network processor design}}}, doi = {{10.1109/LCN.2003.1243185}}, year = {{2003}}, } @inproceedings{19727, author = {{Bonorden, Olaf and Meyer auf der Heide, Friedhelm and Wanka, Rolf}}, booktitle = {{Int. Conf. on Parallel and Distributed Processing Techniques and Applications (PDPTA)}}, pages = {{2202--2208}}, title = {{{Composition of Efficient Nested BSP Algorithms: Minimum Spanning Tree Computation as an Instructive Example}}}, year = {{2002}}, } @inproceedings{19850, author = {{Wanka, Rolf}}, booktitle = {{Proc. Workshop on Graph-Theoretic Concepts in Computer Science (WG)}}, isbn = {{9783540003311}}, issn = {{0302-9743}}, pages = {{413--420}}, title = {{{Any Load-Balancing Regimen for Evolving Tree Computations on Circulant Graphs Is Asymptotically Optimal}}}, doi = {{10.1007/3-540-36379-3_36}}, year = {{2002}}, } @inproceedings{19873, abstract = {{We present a new and easy to use framework for navigating through scenes of arbitrary complexity and topology. In the preprocessing, images for discrete viewpoints and viewing directions are rendered and stored on an external volume. During navigation each image can be displayed within a very short time by loading it from the volume. For acceleration, our prefetching strategy loads possibly needed images for the next few frames if the viewer takes a break. The measurements show that we achieve interactive frame rates, whereby the difference between the minimal and maximal display time is very small. Our system works well with scenes modelled by polygons, but also digital photos can easily be used for describing a 3D scene.}}, author = {{Klein, Jan and Krokowski, Jens and Cuntz, Nicolas}}, booktitle = {{Proc. of 4. GI-Informatiktage}}, pages = {{224--229}}, title = {{{Realtime Navigation in Highly Complex 3D-Scenes Using JPEG Compression}}}, year = {{2002}}, } @article{24336, abstract = {{We define here a distributed abstract state machine (DASM) [7] of the network or routing layer of mobile ad hoc networks [13]. Such networks re- quire routing strategies substantially different from those used in static commu- nication networks, since storing and updating large routing tables at mobile hosts would congest the network with administration packets very fast. In [1], the hypercubic location service is presented, which considers a very strong definition of fault-tolerance thereby improving state-of-the-art ad hoc routing protocols in several respects. Our goal in modeling the protocols for the distrib- uted location service and the position based routing is twofold. First, we support the definition and validation of wireless communication protocols and imple- mentations based thereon. Second, we feel that the abstract computation model naturally reflects the layering principle of communication architectures in com- bination with an uncompromisingly local view of the application domain. Thus we can identify fundamental semantic concepts, such as concurrency, reactivity and asynchronism, directly with the related concepts as imposed by the given application context. }}, author = {{ Benczúr, András and Glässer, Uwe and Lukovszki, Tamás}}, journal = {{Proc. of 10th International Workshop on Abstract State Machines, LNCS}}, title = {{{Formal Description of a Distributed Location Service for Mobile Ad Hoc Networks}}}, year = {{2002}}, } @inproceedings{24338, author = {{Grünewald, Matthias and Lukovszki, Tamás and Schindelhauer, Christian and Volbert, Klaus}}, booktitle = {{Proceedings of the 8th International Euro-Par Conference}}, issn = {{0302-9743}}, title = {{{Distributed Maintenance of Resource Efficient Wireless Network Topologies}}}, doi = {{10.1007/3-540-45706-2_134}}, year = {{2002}}, } @inproceedings{26412, author = {{Volbert, Klaus}}, booktitle = {{Proceedings 10th Euromicro Workshop on Parallel, Distributed and Network-based Processing}}, title = {{{A simulation environment for ad hoc networks using sector subdivision}}}, doi = {{10.1109/empdp.2002.994324}}, year = {{2002}}, } @inproceedings{2136, author = {{Brinkmann, André and Salzwedel, Kay and Scheideler, Christian}}, booktitle = {{SPAA}}, pages = {{53----62}}, title = {{{Compact, adaptive placement schemes for non-uniform requirements}}}, year = {{2002}}, } @article{18853, author = {{Sohler, Christian and Czumaj, Artur}}, journal = {{Proceedings of the 43th Symposium on Foundations of Computer Science (FOCS)}}, pages = {{83--92}}, title = {{{Abstract Combinatorial Programs and Efficient Property Testers}}}, year = {{2002}}, } @techreport{18961, author = {{Lukovszki, Tamás and Benczúr, A.}}, title = {{{A Degree O(log log n) Fault Tolerant Distributed Location Service for Geographic Ad-Hoc Routing}}}, year = {{2002}}, } @phdthesis{18169, abstract = {{Die Implementierung von Algorithmen zur Lösung geometrischer Probleme im Euklidischen Raum (z.B. Berechnung der konvexen Hülle oder des Durchschnitts zweier Polyeder) stellt sich oftmals als hochgradig nichttrivial heraus. Ob und unter welchen Voraussetzungen die verursachenden numerischen Instabilitäten überhaupt ini den Griff zu kriegen oder vielmehr dem Problem inhärent sind, untersucht diese Arbeit in einem auf Turing zurückgehenden Rechenmodell. Im Gegensatz zu algebraischen Ansätzen geht jenes nicht von der Verfügbarkeit exakter Tests auf z.B. Gleichheit reeller Zahlen aus, sondern berücksichtigt die auf Digitalcomputern tatsächlich realisierbare Approximation durch rationale Zahlen. In diesem Rahmen werden beweisbar stabile Algorithmen zum Lösen linearer Gleichungssysteme, zur Matrix-Diagonalisierung und zur linearen wie nichtlinearen Optimierung präsentiert. Als wichtiges technisches Hilfsmittel dient ein neuer Berechenbarkeitsbegriff für reguläre unendliche Mengen reller Zahlen, der sich aus dem systematischen Vergleich verschiedener der Literatur entnommener ad-hoc Ansätze ergibt.}}, author = {{Ziegler, Martin}}, isbn = {{3-935433-24-7}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Zur Berechenbarkeit reeller geometrischer Probleme}}}, volume = {{115}}, year = {{2002}}, } @article{18176, author = {{Ziegler, Martin}}, issn = {{0942-5616}}, journal = {{Mathematical Logic Quarterly (MLQ)}}, number = {{S1}}, pages = {{157--181}}, title = {{{Computability on Regular Subsets of Euclidean Space}}}, doi = {{10.1002/1521-3870(200210)48:1+<157::aid-malq157>3.0.co;2-4}}, volume = {{48}}, year = {{2002}}, } @inproceedings{18177, abstract = {{Consider the classical point location problem: for a fixed arrangement of m hyperplanes and its induced partition of d-space report, upon input of some point, which face it lies in. With sufficient memory, this is easy to solve in logarithmic time O(log m). But how fast can algorithms (formalized as Linear Decision Trees) of *minimum* size be? The present work gives lower and upper bounds for the time complexity of point location under this constraint. They show that, in addition to m, the maximum number w of walls of a cell turns out to be a crucial parameter. We also consider a relaxation of the strict minimum-size condition allowing for constant factor overhead.}}, author = {{Ziegler, Martin and Damerow, Valentina and Finschi, Lukas}}, booktitle = {{Proceedings of the 14th Canadian Conference on Computational Geometry (CCCG'02)}}, title = {{{Point Location Algorithms of Minimum Size}}}, year = {{2002}}, } @inproceedings{18179, abstract = {{Do the solutions of linear equations depend computably on their coefficients? Implicitly, this has been one of the central questions in linear algebra since the very beginning of the subject and the famous Gauß algorithm is one of its numerical answers. Today there exists a tremendous number of algorithms which solve this problem for different types of linear equations. However, actual implementations in floating point arithmetic keep exhibiting numerical instabilities for ill-conditioned inputs. This situation raises the question which of these instabilities are intrinsic, thus caused by the very nature of the problem, and which are just side effects of specific algorithms. To approach this principle question we revisit linear equations from the rigorous point of view of computability. Therefore we apply methods of computable analysis, which is the Turing machine based theory of computable real number functions. It turns out that, given the coefficients of a system of linear equations, we can compute the space of solutions, if and only if the dimension of the solution space is known in advance. Especially, this explains why there cannot exist any stable algorithms under weaker assumptions.}}, author = {{Brattka, Vasco and Ziegler, Martin}}, booktitle = {{Proceedings of the 2nd IFIP International Conference on Theoretical Computer Science}}, pages = {{95--106}}, title = {{{Computability of Linear Equations}}}, doi = {{10.1007/978-0-387-35608-2_9}}, year = {{2002}}, } @inproceedings{18369, abstract = {{Visualising is a method used to help experiencing and understanding causal cohesions in simulation processes. For this purpose, tools for visualising are already implemented in prevalent simulation systems. The user creates his simulation model and generates a 3-dimensional (2,5-dimensional) visualising by means of the simulation system. This helps examining the process which makes it easier for the viewer to “understand” it. Simulation tools usually only provide the opportunity for a unidirectional visualising. In a 3-dimensional surrounding the viewer can not implement an interaction with the simulation while the system is running. Though an interaction during the simulation run enables the user to gain a better understanding of causal cohesions. Solutions via HLA are sophisticated and therefore rather suited for extensive projects. We present a distributed system consisting of a commercial manufacturing simulation tool, a coupling module and a walkthrough system. The distributed system in conjunctions with the coupling module guarantees generality and a wide field of applications of the walkthrough system. Further it guarantees flexibility and selection of the specialized graphics hardware for the walkthrough system. A further contribution of this paper is the solution of the time synchronisation problem caused by simulation tool and walkthrough system. }}, author = {{Mueck, Bengt and Dangelmaier, Wilhelm and Fischer, Matthias and Klemisch, Wolfram}}, booktitle = {{Simulation und Visualisierung}}, pages = {{71--84}}, publisher = {{SCS European Publishing House}}, title = {{{Bi-directional Coupling of Simulation Tools with a Walkthrough-System}}}, year = {{2002}}, } @inproceedings{18566, abstract = {{We analyze a randomized pursuit-evasion game on graphs. This game is played by two players, a hunter and a rabbit. Let G be any connected, undirected graph with n nodes. The game is played in rounds and in each round both the hunter and the rabbit are located at a node of the graph. Between rounds both the hunter and the rabbit can stay at the current node or move to another node. The hunter is assumed to be restricted to the graph G: in every round, the hunter can move using at most one edge. For the rabbit we investigate two models: in one model the rabbit is restricted to the same graph as the hunter, and in the other model the rabbit is unrestricted, i.e., it can jump to an arbitrary node in every round. We say that the rabbit is caught as soon as hunter and rabbit are located at the same node in a round. The goal of the hunter is to catch the rabbit in as few rounds as possible, whereas the rabbit aims to maximize the number of rounds until it is caught. Given a randomized hunter strategy for G, the escape length for that strategy is the worst case expected number of rounds it takes the hunter to catch the rabbit, where the worst case is with regards to all (possibly randomized) rabbit strategies. Our main result is a hunter strategy for general graphs with an escape length of only O (n log (diam(G))) against restricted as well as unrestricted rabbits. This bound is close to optimal since Ω(n) is a trivial lower bound on the escape length in both models. Furthermore, we prove that our upper bound is optimal up to constant factors against unrestricted rabbits.}}, author = {{Adler, Micah and Räcke, Harald and Sivadasan, Naveen and Sohler, Christian and Vöcking, Berthold}}, booktitle = {{Proceedings of the 29th International Colloquium on Automata, Languages and Programming}}, isbn = {{9783540438649}}, issn = {{0302-9743}}, title = {{{Randomized Pursuit-Evasion in Graphs}}}, doi = {{10.1007/3-540-45465-9_77}}, year = {{2002}}, } @article{16489, author = {{Krick, Christof and Meyer auf der Heide, Friedhelm and Räcke, Harald and Vöcking, Bernhard and Westermann, Matthias' }}, issn = {{1432-4350}}, journal = {{Theory of Computing Systems}}, pages = {{217--245}}, title = {{{Data Management in Networks: Experimental Evaluation of a Provably Good Strategy}}}, doi = {{10.1007/s00224-001-1045-z}}, year = {{2002}}, } @inproceedings{16490, abstract = {{We present a new data structure for rendering highly complex virtual environments of arbitrary topology. The special feature of our approach is that it allows an interactive navigation in very large scenes (30 GB/400 million polygons in our benchmark scenes) that cannot be stored in main memory, but only on a local or remote hard disk. Furthermore, it allows interactive rendering of substantially more complex scenes by instantiating objects. For the computation of an approximate image of the scene, a sampling technique is used. In the preprocessing, a so-called sample tree is built whose nodes contain randomly selected polygons from the scene. This tree only uses space that is linear in the number of polygons. In order to produce an image of the scene, the tree is traversed and polygons stored in the visited nodes are rendered. During the interactive walkthrough, parts of the sample tree are loaded from local or remote hard disk. We implemented our algorithm in a prototypical walkthrough system. Analysis and experiments show that the quality of our images is comparable to images computed by the conventional z-buffer algorithm regardless of the scene topology.}}, author = {{Klein, Jan and Krokowski, Jens and Fischer, Matthias and Wand, Michael and Wanka, Rolf and Meyer auf der Heide, Friedhelm}}, booktitle = {{Proceedings of the ACM symposium on Virtual reality software and technology - VRST '02}}, isbn = {{1581135300}}, title = {{{The randomized sample tree: a data structure for interactive walkthroughs in externally stored virtual environments}}}, doi = {{10.1145/585740.585764}}, year = {{2002}}, } @inproceedings{16491, author = {{Meyer auf der Heide, Friedhelm and Schindelhauer, Christian and Volbert, Klaus and Grünewald, Matthias}}, booktitle = {{Proceedings of the fourteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '02}}, isbn = {{1581135297}}, title = {{{Energy, congestion and dilation in radio networks}}}, doi = {{10.1145/564870.564910}}, year = {{2002}}, } @inbook{16723, author = {{Meyer auf der Heide, Friedhelm and Kumar, Mohan and Nikoletseas, Sotiris and Spirakis, Paul}}, booktitle = {{Euro-Par 2002 Parallel Processing}}, isbn = {{9783540440499}}, issn = {{0302-9743}}, title = {{{Mobile Computing, Mobile Networks}}}, doi = {{10.1007/3-540-45706-2_133}}, year = {{2002}}, } @phdthesis{19622, author = {{Schröder, Klaus}}, isbn = {{3-931466-88-4}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Balls into Bins: A Paradigm for Job Allocation, Data Distribution Processes, and Routing}}}, volume = {{89}}, year = {{2001}}, } @inproceedings{19797, author = {{Salzwedel, Kay and Hartmann, Georg and Wolff, Carsten and Preis, Robert}}, booktitle = {{Proceedings of the PDPTA 2001}}, pages = {{463--470}}, title = {{{Efficient Parallel Simulations of Pulse-Coded Neural Networks (PCNN)}}}, volume = {{1}}, year = {{2001}}, } @article{2139, author = {{Meyer auf der Heide, Friedhelm and Scheideler, Christian}}, journal = {{Combinatorica}}, number = {{1}}, pages = {{95----138}}, title = {{{Deterministic Routing With Bounded Buffers: Turning Offline Into Online Protocols}}}, doi = {{10.1007/s004930170007}}, volume = {{21}}, year = {{2001}}, } @inproceedings{2141, author = {{Berenbrink, Petra and Brinkmann, André and Scheideler, Christian}}, booktitle = {{PDP}}, pages = {{227----234}}, publisher = {{IEEE Computer Society}}, title = {{{SIMLAB-A Simulation Environment for Storage Area Networks}}}, year = {{2001}}, } @article{18749, author = {{Czumaj, Artur and Sohler, Christian}}, isbn = {{9783540422877}}, issn = {{0302-9743}}, journal = {{Proceedings of the 28th International Colloquium on Automata, Languages and Programming (ICALP)}}, pages = {{493--505}}, title = {{{Testing Hypergraph Coloring}}}, doi = {{10.1007/3-540-48224-5_41}}, year = {{2001}}, } @inproceedings{18750, author = {{Sohler, Christian and Czumaj, Artur}}, booktitle = {{Proceedings of the 12th ACM-SIAM Symposium on Discrete Algorithms}}, pages = {{865--872}}, title = {{{Soft Kinetic Data Structures}}}, year = {{2001}}, } @article{18857, abstract = {{This paper investigates geometric problems in the context of property testing algorithms. Property testing is an emerging area in computer science in which one is aiming at verifying whether a given object has a predetermined property or is “far” from any object having the property. Although there has been some research previously done in testing geometric properties, prior works have been mostly dealing with the study of combinatorial notion of the distance defining whether an object is “far” or it is “close”; very little research has been done for geometric notion of distance measures, that is, distance measures that are based on the geometry underlying input objects. The main objective of this work is to develop sound models to study geometric problems in the context of property testing. Comparing to the previous work in property testing, there are two novel aspects developed in this paper: geometric measures of being close to an object having the predetermined property, and the use of geometric data structures as basic primitives to design the testers. We believe that the second aspect is of special importance in the context of property testing and that the use of specialized data structures as basic primitives in the testers can be applied to other important problems in this area. We shall discuss a number of models that in our opinion fit best geometric problems and apply them to study geometric properties for three very fundamental and representative problems in the area: testing convex position, testing map labeling, and testing clusterability.}}, author = {{Sohler, Christian and Czumaj, Artur}}, journal = {{Proceedings of the 9th Annual European Symposium on Algorithms (ESA`01)}}, pages = {{266--277}}, title = {{{Property Testing with Geometric Queries}}}, doi = {{10.1007/3-540-44676-1_22}}, year = {{2001}}, } @inproceedings{18964, author = {{Lukovszki, Tamás and Maheshwari, Anil and Zeh, Norbert}}, booktitle = {{Proceedings of the 21st Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2001), LNCS}}, isbn = {{9783540430025}}, issn = {{0302-9743}}, title = {{{I/O-Efficient Batched Range Counting and Its Applications to Proximity Problems}}}, doi = {{10.1007/3-540-45294-x_21}}, year = {{2001}}, } @article{23731, abstract = {{ On 22 May 2000, the factorization of a pseudorandom polynomial of degree 1 048 543 over the binary field Z 2 was completed on a 4-processor Linux PC, using roughly 100 CPU-hours. The basic approach is a combination of the factorization software BIPOLAR and a parallel version of Cantor's multiplication algorithm. The PUB-library (Paderborn University BSP library) is used for the implementation of the parallel communication. }}, author = {{Bonorden, Olaf and von zur Gathen, Joachim and Gerhard, Jürgen and Müller, Olaf}}, issn = {{0163-5824}}, journal = {{ACM SIGSAM Bulletin}}, pages = {{16--18}}, title = {{{Factoring a binary polynomial of degree over one million}}}, doi = {{10.1145/504331.504333}}, year = {{2001}}, } @inproceedings{18152, abstract = {{Computing the spectral decomposition of a normal matrix is among the most frequent tasks to numerical mathematics. A vast range of methods are employed to do so, but all of them suffer from instabilities when applied to degenerate matrices, i.e., those having multiple eigenvalues. We investigate the spectral representation's effectivity properties on the sound formal basis of computable analysis. It turns out that in general the eigenvectors cannot be computed from a given matrix. If however the size of the matrix' spectrum (=number of different eigenvalues) is known in advance, it can be diagonalized effectively. Thus, in principle the spectral decomposition can be computed under remarkably weak non-degeneracy conditions.}}, author = {{Ziegler, Martin and Brattka, Vasco}}, booktitle = {{Proceedings of the 4th Workshop on Computability and Complexity in Analysis (CCA'2000)}}, isbn = {{9783540421979}}, issn = {{0302-9743}}, pages = {{378--388}}, title = {{{A Computable Spectral Theorem}}}, doi = {{10.1007/3-540-45335-0_23}}, volume = {{2064}}, year = {{2001}}, } @inproceedings{18166, abstract = {{What is the maximum number of edges of the d-dimensional hypercube, denoted by S(d,k), that can be sliced by k many hyperplanes? This question on combinatorial properties of Euclidean geometry arising from linear separability considerations in the theory of Perceptrons has become an issue on its own. We use computational and combinatorial methods to obtain new bounds on S(d,k), s<=8. These strengthen earlier results on hypercube cut numbers.}}, author = {{Ziegler, Martin and Emamy-Khansari, M. Reza}}, booktitle = {{Proceedings of the First International Conference on Discrete Models - Combinatorics, Computation and Geometry (DM-CCG'2001)}}, pages = {{155--164}}, title = {{{New Bounds for Hypercube Slicing Numbers}}}, volume = {{AA}}, year = {{2001}}, } @inproceedings{18168, abstract = {{We consider the classical LINEAR OPTIMIZATION Problem, but in the Turing rather than the RealRAM model. Asking for mere computability of a function's maximum over some closed domain, we show that the common presumptions 'full-dimensional' and `bounded' in fact cannot be omitted: The sound framework of Recursive Analysis enables us to rigorously prove this folkloristic observation! On the other hand, convexity of this domain may be weakened to connectedness, and even NON-linear functions turn out to be effectively optimizable.}}, author = {{Brattka, Vasco and Ziegler, Martin}}, booktitle = {{Proceedings of the 13th Canadian Conference on Computational Geometry (CCCG'01)}}, pages = {{181--184}}, title = {{{Turing Computability of (Non-)Linear Optimization}}}, year = {{2001}}, } @inproceedings{18370, abstract = {{We present a new approximate occlusion-culling algorithm that in contrast to other algorithms, manages the objects of the scene in a 3D-sectorgraph. For generating a frame, as far as possible only the visible objects are rendered that can be found quickly by an edge of the graph. The algorithm allows a real-time navigation with over 20 frames per second in complex scenes consisting of over 10 millions of polygons. Moreover, approximation errors are very low.}}, author = {{Klein, Jan and Fischer, Matthias}}, booktitle = {{Proc. of 3. GI-Informatiktage 2001}}, pages = {{275 -- 278}}, title = {{{Occlusion Culling for Virtual Environments based on the 3D-Sectorgraph}}}, year = {{2001}}, } @inproceedings{16492, abstract = {{We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of an arbitrary three-dimensional scene consisting of triangular primitives by reconstruction from a dynamically chosen set of random surface sample points. This approach is independent of mesh connectivity and topology. The resulting rendering time grows only logarithmically with the numbers of triangles in the scene. We were able to render walkthroughs of scenes of up to 10^14 triangles at interactive frame rates. Automatic identification of low detail scene components ensures that the rendering speed of the randomized z-buffer cannot drop below that of conventional z-buffer rendering. Experimental and analytical evidence is given that the image quality is comparable to that of common approaches like z-buffer rendering. The precomputed data structures employed by the randomized z-buffer allow for interactive dynamic updates of the scene. Their memory requirements grow only linearly with the number of triangles and allow for a scene graph based instantiation scheme to further reduce memory consumption.}}, author = {{Wand, Michael and Fischer, Matthias and Peter, Ingmar and Meyer auf der Heide, Friedhelm and Straßer, Wolfgang}}, booktitle = {{Proceedings of the 28th annual conference on Computer graphics and interactive techniques - SIGGRAPH '01}}, isbn = {{158113374X}}, title = {{{The randomized z-buffer algorithm}}}, doi = {{10.1145/383259.383299}}, year = {{2001}}, } @inbook{16493, author = {{Meyer auf der Heide, Friedhelm}}, booktitle = {{Graph-Theoretic Concepts in Computer Science}}, isbn = {{9783540427070}}, issn = {{0302-9743}}, title = {{{Data Management in Networks}}}, doi = {{10.1007/3-540-45477-2_2}}, volume = {{2204}}, year = {{2001}}, } @inbook{16494, author = {{Meyer auf der Heide, Friedhelm and Wanka, Rolf}}, booktitle = {{Computational Science - ICCS 2001}}, isbn = {{9783540422334}}, issn = {{0302-9743}}, title = {{{Parallel Bridging Models and Their Impact on Algorithm Design}}}, doi = {{10.1007/3-540-45718-6_68}}, year = {{2001}}, } @book{16722, editor = {{Meyer auf der Heide, Friedhelm}}, isbn = {{9783540424932}}, issn = {{0302-9743}}, publisher = {{Springer }}, title = {{{Algorithms — ESA 2001, 9th Annual European Symposium Århus, Denmark}}}, doi = {{10.1007/3-540-44676-1}}, year = {{2001}}, } @phdthesis{19620, author = {{Rieping, Ingo}}, isbn = {{3-931466-80-9}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Communication in Parallel Systems-Models, Algorithms and Implementations}}}, volume = {{81}}, year = {{2000}}, } @phdthesis{19621, author = {{Westermann, Matthias}}, isbn = {{3-931466-89-2}}, publisher = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}}, title = {{{Caching in Networks: Non-Uniform Algorithms and Memory Capacity Constraints}}}, volume = {{90}}, year = {{2000}}, } @techreport{19733, author = {{Bonorden, Olaf and Rieping, Ingo and von Otte, Ingo and Juurlink, Bernhardus}}, title = {{{PUB-Library, Release 7.0, User Guide and Function Reference}}}, year = {{2000}}, } @misc{19784, author = {{Scheideler, Christian}}, isbn = {{3-931466-77-9}}, title = {{{Probabilistic Methods for Coordination Problems}}}, year = {{2000}}, } @inproceedings{19849, author = {{Bednara, M. and Beyer, O. and Teich, J. and Wanka, Rolf}}, booktitle = {{Proc. Int. Conf. on Application Specific Systems, Architectures, and Processors (ASAP)}}, isbn = {{0769507166}}, pages = {{299--308}}, title = {{{Tradeoff analysis and architecture design of a hybrid hardware/software sorter}}}, doi = {{10.1109/asap.2000.862400}}, year = {{2000}}, } @article{2143, author = {{Adler, Micah and Scheideler, Christian}}, journal = {{Theory Comput. Syst.}}, number = {{5/6}}, pages = {{337----391}}, title = {{{Efficient Communication Strategies for Ad Hoc Wireless Networks}}}, doi = {{10.1007/s002240010006}}, volume = {{33}}, year = {{2000}}, } @article{2145, author = {{Scheideler, Christian and Vöcking, Berthold}}, journal = {{SIAM J. Comput.}}, number = {{4}}, pages = {{1126----1155}}, title = {{{From Static to Dynamic Routing: Efficient Transformations of Store-and-Forward Protocols}}}, doi = {{10.1137/S0097539799353431}}, volume = {{30}}, year = {{2000}}, } @inproceedings{2146, author = {{Berenbrink, Petra and Brinkmann, André and Scheideler, Christian}}, booktitle = {{PDPTA}}, title = {{{Distributed Path Selection for Storage Networks}}}, year = {{2000}}, } @inproceedings{2147, author = {{Czumaj, Artur and Scheideler, Christian}}, booktitle = {{SODA}}, pages = {{30----39}}, title = {{{Coloring non-uniform hypergraphs: a new algorithmic approach to the general Lovász local lemma}}}, year = {{2000}}, } @article{2148, author = {{Czumaj, Artur and Scheideler, Christian}}, journal = {{Random Struct. Algorithms}}, number = {{3-4}}, pages = {{213----237}}, title = {{{Coloring nonuniform hypergraphs: A new algorithmic approach to the general Lovász local lemma}}}, volume = {{17}}, year = {{2000}}, } @inproceedings{2149, author = {{Brinkmann, André and Salzwedel, Kay and Scheideler, Christian}}, booktitle = {{SPAA}}, pages = {{119----128}}, title = {{{Efficient, distributed data placement strategies for storage area networks (extended abstract)}}}, year = {{2000}}, } @inproceedings{2150, author = {{Czumaj, Artur and Scheideler, Christian}}, booktitle = {{STOC}}, pages = {{38----47}}, publisher = {{ACM}}, title = {{{A new algorithm approach to the general Lovász local lemma with applications to scheduling and satisfiability problems (extended abstract)}}}, year = {{2000}}, } @techreport{17865, abstract = {{We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of a three dimensional scene of triangular primitives by reconstruction from a random sample of surface points which are chosen with a probability proportional to the projected area of the objects. The approach is independent of mesh connectivity and topology. It leads to a rendering time that grows only logarithmically with the numbers of triangles in the scene and to linear memory consumption, thus allowing walkthroughs of scenes of extreme complexity. We consider different methods for image reconstruction which aim at correctness, rendering speed and image quality and we develop an efficient data structure for sample extraction in output-sensitive time which allows for efficient dynamic updates of the scene. Experiments confirm that scenes consisting of some hundred billion triangles can be rendered within seconds with an image quality comparable to a conventional z-buffer rendering; in special cases, realtime performance can be achieved.}}, author = {{Wand, Michael and Fischer, Matthias and Meyer auf der Heide, Friedhelm}}, title = {{{Randomized Point Sampling for Output-Sensitive Rendering of Complex Dynamic Scenes}}}, year = {{2000}}, } @inproceedings{18962, author = {{Govindarajan, Sathish and Lukovszki, Tamas and Maheshwari, Anil and Zeh, Norbert}}, booktitle = {{Proceedings of the 8th Annual European Symposium on Algorithms (ESA 2000), LNCS}}, issn = {{0178-4617}}, pages = {{585--614}}, title = {{{I/O-Efficient Well-Separated Pair Decomposition and Applications}}}, doi = {{10.1007/s00453-005-1197-3}}, year = {{2000}}, } @inproceedings{17990, abstract = {{We consider the notion of Property Testing as applied to computational geometry. We aim at developing efficient algorithms which determine whether a given (geometrical) object has a predetermined property Q or is 'far' from any object having the property. We show that many basic geometric properties have very efficient testing algorithms, whose running time is significantly smaller than the object description size.}}, author = {{Czumaj, Artur and Sohler, Christian and Ziegler, Martin}}, booktitle = {{Proceedings of the 8th Annual European Symposium on Algorithms (ESA'00)}}, isbn = {{9783540410041}}, issn = {{0302-9743}}, pages = {{155--166}}, publisher = {{Springer}}, title = {{{Property Testing in Computational Geometry}}}, doi = {{10.1007/3-540-45253-2_15}}, volume = {{4698}}, year = {{2000}}, } @inproceedings{18146, abstract = {{Since its very beginning, linear algebra is a highly algorithmic subject. Let us just mention the famous Gauss Algorithm which was invented before the theory of algorithms has been developed. The purpose of this paper is to link linear algebra explicitly to computable analysis, that is the theory of computable real number functions. Especially, we will investigate in which sense the dimension of a given linear subspace can be computed. The answer highly depends on how the linear subspace is given: if it is given by a finite number of vectors whose linear span represents the space, then the dimension does not depend continuously on these vectors and consequently it cannot be computed. If the linear subspace is represented via its distance function, which is a standard way to represent closed subspaces in computable analysis, then the dimension does computably depend on the distance function.}}, author = {{Ziegler, Martin and Brattka, Vasco}}, booktitle = {{SOFSEM 2000: Theory and Practice of Informatics}}, isbn = {{9783540413486}}, issn = {{0302-9743}}, pages = {{450--458}}, publisher = {{Springer}}, title = {{{Computing the Dimension of Linear Subspaces}}}, doi = {{10.1007/3-540-44411-4_34}}, volume = {{1963}}, year = {{2000}}, } @inproceedings{18150, abstract = {{What is the minimum number of hyperplanes that slice all edges of the d-dimensional hypercube? The answers have been known for d<=4.
This work settles the problem for d=5 and d=6. More precisely, a computer search implies that 4 hyperplanes do not suffice for this purpose (but 5 do).
We also develop computational approaches for attacking this extremal problem from combinatorial geometry in higher dimensions. They allow us to determine for example all maximal sliceable subsets of hypercube edges up to dimension 7.}}, author = {{Ziegler, Martin and Sohler, Christian}}, booktitle = {{Proceedings of the 12th Canadian Conference on Computational Geometry (CCCG'00)}}, pages = {{73--79}}, title = {{{Computing Cut Numbers}}}, year = {{2000}}, } @article{18446, abstract = {{We consider comparator networks M that are used repeatedly: while the output produced by M is not sorted, it is fed again into M. Sorting algorithms working in this way are called periodic. The number of parallel steps performed during a single run of M is called its period, the sorting time of M is the total number of parallel steps that are necessary to sort in the worst case. Periodic sorting networks have the advantage that they need little hardware (control logic, wiring, area) and that they are adaptive. We are interested in comparator networks of a constant period, due to their potential applications in hardware design. Previously, very little was known on such networks. The fastest solutions required time O(nε) where the depth was roughly 1/ε. We introduce a general method called periodification scheme that converts automatically an arbitrary sorting network that sorts n items in time T(n) and that has layout area A(n) into a sorting network that has period 5, sorts ***(n • T(n) items in time O(T(