*) non-deterministic computation;

*) relativized computation, specifically given access to oracles like 0' or 0'';

*) encoding input x and/or output y=f(x) in weaker ways according to the Real Arithmetic Hierarchy.

It turns out that, among these approaches, only the first one provides the required power.}, author = {Ziegler, Martin}, booktitle = {Proc. CiE 2005: New Computational Paradigms}, isbn = {9783540261797}, issn = {0302-9743}, pages = {562--571}, publisher = {Springer}, title = {{Computability and Continuity on the Real Arithmetic Hierarchy and the Power of Type-2 Nondeterminism}}, doi = {10.1007/11494645_68}, volume = {3526}, year = {2005}, } @inproceedings{18449, author = {Loeser, Christoph and Drüke, Isabell and Oesterdiekhoff, Brigitte}, booktitle = {IEEE International Conference on Industrial Informatics (INDIN)}, title = {{ Glaschick, Rainer: Integrative Approach of Web Services and Universal Plug and Play within an AV Scenario}}, year = {2005}, } @article{18855, abstract = {We consider the problem of computing the weight of a Euclidean minimum spanning tree for a set of n points in $\mathbb R^d$. We focus on the setting where the input point set is supported by certain basic (and commonly used) geometric data structures that can provide efficient access to the input in a structured way. We present an algorithm that estimates with high probability the weight of a Euclidean minimum spanning tree of a set of points to within $1 + \eps$ using only $\widetilde{\O}(\sqrt{n} \, \text{poly} (1/\eps))$ queries for constant d. The algorithm assumes that the input is supported by a minimal bounding cube enclosing it, by orthogonal range queries, and by cone approximate nearest neighbor queries. Read More: https://epubs.siam.org/doi/10.1137/S0097539703435297 }, author = {Czumaj, Artur and Ergün, Funda and Fortnow, Lance and Magen, Avner and Newman, Ilan and Rubinfeld, Ronitt and Sohler, Christian}, issn = {0097-5397}, journal = {SIAM Journal on Computing}, number = {1}, pages = {91--109}, title = {{Approximating the Weight of the Euclidean Minimum Spanning Tree in Sublinear Time}}, doi = {10.1137/s0097539703435297}, volume = {35}, year = {2005}, } @inproceedings{18867, abstract = {Modern computer graphics systems are able to render sophisticated 3D szenes consisting of millions of polygons. In this paper we address the problem of occlusion culling. Aila, Miettinen, and Nordlund suggested to implement a FIFO buffer on graphics cards which is able to delay the polygons before drawing them. When one of the polygons within the buffer is occluded or masked by another polygon arriving later from the application, the rendering engine can drop the occluded one without rendering, saving important rendering time.

We introduce a theoretical online model to analyse these problems in theory using competitive analysis. For different cost measures addressed we invent the first competitive algorithms for online occlusion culling. Our implementation shows that these algorithms outperform known ones for real 3D scenes as well.}, author = {Frahling, Gereon and Krokowski, Jens}, booktitle = {Proc. of the 13th Annual European Symposium on Algorithms (ESA 2005)}, isbn = {9783540291183}, issn = {0302-9743}, pages = {758--769}, publisher = {Springer}, title = {{Online Occlusion Culling}}, doi = {10.1007/11561071_67}, volume = {3669}, year = {2005}, } @inproceedings{18925, abstract = {The dynamic page migration problem citedynamic-page-migration is defined in

a distributed network of $n$ mobile nodes sharing one indivisible memory page

of size $D$. During runtime, the nodes can both access a unit of data from

the page and move with a constant speed, thus changing the costs of communication.

The problem is to compute

to minimize the total communication cost.

In this paper we construct and analyze the first deterministic algorithm for this problem.

We prove that it achieves an (up to a constant factor) optimal competitive ratio

$O(n cdot sqrtD)$. We show that the randomization of this algorithm

improves this ratio to $O(sqrtD cdot log n)$ (against an oblivious adversary).

This substantially improves an $O(n cdot sqrtD)$ upper bound from citedynamic-page-migration.

We also give an almost matching lower bound of $Omega(sqrtD cdot sqrtlog n)$ for this problem.}, author = {Bienkowski, Marcin and Dynia, Miroslaw and Korzeniowski, Miroslaw}, booktitle = {Proc. of the 22nd Symposium on Theoretical Aspects of Computer Science (STACS)}, isbn = {9783540249986}, issn = {0302-9743}, pages = {365--376}, title = {{Improved Algorithms for Dynamic Page Migration}}, doi = {10.1007/978-3-540-31856-9_30}, year = {2005}, } @inbook{16468, author = {Bienkowski, Marcin and Korzeniowski, Miroslaw and Meyer auf der Heide, Friedhelm}, booktitle = {Peer-to-Peer Systems IV}, isbn = {9783540290681}, issn = {0302-9743}, title = {{Dynamic Load Balancing in Distributed Hash Tables}}, doi = {10.1007/11558989_20}, year = {2005}, } @proceedings{17113, editor = {Leonardi, Stefano and Meyer auf der Heide, Friedhelm and Wagner, Dorothea}, location = {Schloss Dagstuhl, Germany}, title = {{Abstracts Collection -- Algorithmic Aspects of Large and Complex Networks}}, volume = {05361}, year = {2005}, } @inproceedings{16470, abstract = {We present a web computing library (PUBWCL) in Java that allows to execute strongly coupled, massively parallel algorithms in the bulk-synchronous (BSP) style on PCs distributed over the internet whose owners are willing to donate their unused computation power. PUBWCL is realized as a peer-to-peer system and features migration and restoration of BSP processes executed on it. The use of Java guarantees a high level of security and makes PUBWCL platform independent. In order to estimate the loss of efficiency inherent in such a Java-based system, we have compared it to our C-based PUB-Library. }, author = {Bonorden, Olaf and Gehweiler, Joachim and Meyer auf der Heide, Friedhelm}, booktitle = {Proceeedings of 6th International Conference on Parallel Processing and Applied Mathematics (PPAM)}, isbn = {9783540341413}, issn = {0302-9743}, pages = {801--808}, title = {{A Web Computing Environment for Parallel Algorithms in Java}}, doi = {10.1007/11752578_96}, year = {2005}, } @inbook{16469, author = {Bienkowski, Marcin and Meyer auf der Heide, Friedhelm}, booktitle = {Mathematical Foundations of Computer Science 2005}, isbn = {9783540287025}, issn = {0302-9743}, title = {{Page Migration in Dynamic Networks}}, doi = {10.1007/11549345_1}, year = {2005}, } @article{18282, author = {Ziegler, Martin}, issn = {0020-7748}, journal = {International Journal of Theoretical Physics}, number = {11}, pages = {2059--2071}, title = {{Computational Power of Infinite Quantum Parallelism}}, doi = {10.1007/s10773-005-8984-0}, volume = {44}, year = {2005}, } @article{18763, abstract = {Property testing is a relaxation of classical decision problems which aims at distinguishing between functions having a predetermined property and functions being far from any function having the property. In this paper we present a novel framework for analyzing property testing algorithms. Our framework is based on a connection of property testing and a new class of problems which we call abstract combinatorial programs . We show that if the problem of testing a property can be reduced to an abstract combinatorial program of small dimension , then the property has an efficient tester. We apply our framework to a variety of problems. We present efficient property testing algorithms for geometric clustering problems, for the reversal distance problem, and for graph and hypergraph coloring problems. We also prove that, informally, any hereditary graph property can be efficiently tested if and only if it can be reduced to an abstract combinatorial program of small size. Our framework allows us to analyze all our testers in a unified way, and the obtained complexity bounds either match or improve the previously known bounds. Furthermore, even if the asymptotic complexity of the testers is not improved, the obtained proofs are significantly simpler than the previous ones. We believe that our framework will help to understand the structure of efficiently testable properties.}, author = {Czumaj, Artur and Sohler, Christian}, issn = {0097-5397}, journal = {SIAM Journal on Computing}, number = {3}, pages = {580--615}, title = {{Abstract Combinatorial Programs and Efficient Property Testers}}, doi = {10.1137/s009753970444199x}, volume = {34}, year = {2005}, } @inproceedings{18768, author = {Bădoiu, Mihai and Czumaj, Artur and Indyk, Piotr and Sohler, Christian}, booktitle = {Proc. of the 32nd International Colloquium on Automata, Languages and Programming (ICALP)}, isbn = {9783540275800}, issn = {0302-9743}, pages = {866--877}, title = {{Facility Location in Sublinear Time}}, doi = {10.1007/11523468_70}, year = {2005}, } @inproceedings{18787, abstract = {A dynamic geometric data stream consists of a sequence of m insert/delete operations of points from the discrete space {1,..., ∆} d [26]. We develop streaming (1 + ɛ)-approximation algorithms for k-median, k-means, MaxCut, maximum weighted matching (MaxWM), maximum travelling salesperson (MaxTSP), maximum spanning tree (MaxST), and average distance over dynamic geometric data streams. Our algorithms maintain a small weighted set of points (a coreset) that approximates with probability 2/3 the current point set with respect to the considered problem during the m insert/delete operations of the data stream. They use poly(ɛ −1, log m, log ∆) space and update time per insert/delete operation for constant k and dimension d. Having a coreset one only needs a fast approximation algorithm for the weighted problem to compute a solution quickly. In fact, even an exponential algorithm is sometimes feasible as its running time may still be polynomial in n. For example one can compute in poly(log n, exp(O((1+log(1/ɛ)/ɛ) d−1))) time a solution to k-median and k-means [21] where n is the size of the current point set and k and d are constants. Finding an implicit solution to MaxCut can be done in poly(log n, exp((1/ɛ) O(1))) time. For MaxST and average distance we require poly(log n, ɛ −1) time and for MaxWM we require O(n 3) time to do this.}, author = {Sohler, Christian and Frahling, Gereon}, booktitle = {Proceedings of the 37th ACM Symposium on Theory of Computing (STOC)}, pages = {209--217}, title = {{Coresets in Dynamic Geometric Data Streams}}, year = {2005}, } @inproceedings{18915, abstract = {We present a simple two-person Bucket Game, based on throwing balls into buckets, and we

discuss possible players' strategies.

We use these strategies to create an approximation algorithm for a generalization of

the well known Set Cover problem, where we need to cover each element by at least $k$ sets.

Furthermore, we apply these strategies to construct a randomized algorithm for Dynamic Page Migration

problem achieving the optimal competitive ratio against an oblivious adversary.}, author = {Bienkowski, Marcin and Byrka, Jarosław}, booktitle = {Proc. of the 13th Annual European Symposium on Algorithms (ESA 2005)}, isbn = {9783540291183}, issn = {0302-9743}, pages = {815--826}, publisher = {Springer }, title = {{Bucket Game with Applications to Set Multicover and Dynamic Page Migration}}, doi = {10.1007/11561071_72}, volume = {3669}, year = {2005}, } @inproceedings{16471, abstract = {We compare different load balancing strategies for Bulk-Synchronous Parallel (BSP) programs in a web computing environment. In order to handle the influence of the fluctuating available computation power, we classify the external work load. We evaluate the load balancing algorithms using our web computing library for BSP programs in Java (PUBWCL). Thereby we simulated the external work load in order to have repeatable testing conditions. With the best performing load balancing strategy we could save 39% of the execution time averaged and even up to 50% in particular cases.}, author = {Bonorden, Olaf and Gehweiler, Joachim and Meyer auf der Heide, Friedhelm}, booktitle = {Proceeedings of 6th International Conference on Parallel Processing and Applied Mathematics (PPAM)}, isbn = {9783540341413}, issn = {0302-9743}, pages = {839--846}, title = {{Load Balancing Strategies in a Web Computing Environment}}, doi = {10.1007/11752578_101}, year = {2005}, } @phdthesis{18910, author = {Bienkowski, Marcin}, isbn = {978-3-942647-01-4}, title = {{Page migration in dynamic networks}}, year = {2005}, } @phdthesis{17413, author = {Fischer, Matthias}, title = {{Design, analysis, and evaluation of a data structure for distributed virtual environments}}, year = {2005}, } @article{18790, author = {Czumaj, Artur and Sohler, Christian}, issn = {0304-3975}, journal = {Theoretical Computer Science}, number = {1}, pages = {37--52}, title = {{Testing hypergraph colorability}}, doi = {10.1016/j.tcs.2004.09.031}, volume = {331}, year = {2005}, } @inproceedings{16480, author = {Leonardi, S. and Marchetti-Spaccamela, A. and Meyer auf der Heide, Friedhelm}, booktitle = {SPAA '04: Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures}, isbn = {1581138407}, title = {{Scheduling against an adversarial network}}, doi = {10.1145/1007912.1007936}, year = {2004}, } @inproceedings{18448, author = {Oesterdiekhoff, Brigitte}, booktitle = {Proceedings of IFIP Working Conference on Distributed and Parallel Embedded Systems (DIPES'04)}, title = {{Internet Premium Services for Flexible Format Distributed Devices}}, year = {2004}, } @inproceedings{18777, author = {Sohler, Christian and Damerow, Valentina}, booktitle = {Proceedings of the 20th European Workshop on Computational Geometry (EWCG'04)}, pages = {93 -- 96}, title = {{Smoothed Number of Extreme Points under Uniform Noise}}, year = {2004}, } @inproceedings{16474, abstract = {Given n distinct points p1, p2, ... , pn in the plane, the map labeling problem with four squares is to place n axis-parallel equi-sized squares Q1, ... ,Qn of maximum possible size such that pi is a corner of Qi and no two squares overlap. This problem is NP-hard and no algorithm with approximation ratio better than 1/2 exists unless P = NP [10]. In this paper, we consider a scenario where we want to visualize the information gathered by smart dust, i.e. by a large set of simple devices, each consisting of a sensor and a sender that can gather sensor data and send it to a central station. Our task is to label (the positions of) these sensors in a way described by the labeling problem above. Since these devices are not positioned accurately (for example, they might be dropped from an airplane), this gives rise to consider the map labeling problem under the assumption, that the positions of the points are not fixed precisely, but perturbed by random noise. In other words, we consider the smoothed complexity of the map labeling problem. We present an algorithm that, under such an assumption and Gaussian random noise with sufficiently large variance, has linear smoothed complexity.}, author = {Bansal, Vikas and Meyer auf der Heide, Friedhelm and Sohler, Christian}, booktitle = {12th Annual European Symposium on Algorithms (ESA 2004)}, isbn = {9783540230250}, issn = {0302-9743}, title = {{Labeling Smart Dust}}, doi = {10.1007/978-3-540-30140-0_9}, volume = {3221}, year = {2004}, } @inproceedings{18778, abstract = {Given a point set P in the d-dimensional unit hypercube, we give upper bounds on the maximal expected number of extreme points when each point is perturbed by small random noise chosen independently for each point from the same noise distribution Δ. Our results are parametrized by the variance of the noise distribution. For large variance we essentially consider the average case for distribution Δ while for variance 0 we consider the worst case. Hence our results give upper bounds on the number of extreme points where our input distributions range from average case to worst case.

Our main contribution is a rather general lemma that can be used to obtain upper bounds on the expected number of extreme points for a large class of noise distributions. We then apply this lemma to obtain explicit bounds for random noise coming from the Gaussian normal distribution of variance σ² and the uniform distribution in a hypercube of side length &epsilon. For these noise distributions we show upper bounds of O( (1/ σ )^d * log^3/2 * d - 1 n ) and O( ( (n log n) / ε )^d/(d+1) ), respectively. Besides its theoretical motivation our model is also motivated by the observation that in many applications of convex hull algorithms the input data is inherently noisy, e.g. when the data comes from physical measurement or imprecise arithmetic is used.}, author = {Damerow, Valentina and Sohler, Christian}, booktitle = {Proceedings of the 12th European Symposium on Algorithms (ESA'04)}, isbn = {9783540230250}, issn = {0302-9743}, title = {{Extreme Points Under Random Noise}}, doi = {10.1007/978-3-540-30140-0_25}, year = {2004}, } @inproceedings{18785, abstract = {A limiting factor in the performance of a render- ing system is the number of state changes, i.e., changes of the attributes material, texture, shader program, etc., in the stream of rendered primitives. We propose to include a small buffer between appli- cation and graphics hardware in the rendering sys- tem. This pipeline buffer is used to rearrange the incoming sequence of primitives on-line and locally in such a way that the number of state changes is minimized. This method is generic; it can be easily integrated into existing rendering systems. In our experiments a pipeline buffer reduces the number of state changes by an order of magnitude and achieves almost the same rendering time as an optimal, i.e., presorted, sequence without pipeline buffer. Due to its simple structure and its low mem- ory requirements this method can easily be imple- mented in software or even hardware.}, author = {Sohler, Christian and Krokowski, Jens and Räcke, Harald and Westermann, Matthias}, booktitle = {Proceedings of the Vision, Modeling, and Visualization Conference (VMV 2004)}, title = {{Reducing State Changes with a Pipeline Buffer}}, year = {2004}, } @inproceedings{16475, author = {Bienkowski, Marcin and Korzeniowski, Miroslaw and Meyer auf der Heide, Friedhelm}, booktitle = {Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures - SPAA '04}, isbn = {1581138407}, title = {{Fighting against two adversaries}}, doi = {10.1145/1007912.1007923}, year = {2004}, } @inproceedings{17346, author = {Brinkmann, André and Heidebuer, Michael and Meyer auf der Heide, Friedhelm and Rückert, Ulrich and Salzwedel, Kay and Vodisek, Mario}, booktitle = {21st {IEEE} Conference on Mass Storage Systems and Technologies / 12th {NASA} Goddard Conference on Mass Storage Systems and Technologies, Greenbelt, Maryland, USA}, editor = {Kobler, Ben and Hariharan, P. C.}, pages = {153----157}, publisher = {IEEE}, title = {{V:Drive - Costs and Benefits of an Out-of-Band Storage Virtualization System}}, year = {2004}, } @inproceedings{18279, abstract = {For $c in REAL$, a $c$-spanner is a subgraph of a complete Euclidean graph satisfying that between any two vertices there exists a path of weighted length at most $c$ times their geometric distance. Based on this property to approximate a complete weighted graph, sparse spanners have found many applications, e.g., in FPTAS, geometric searching, and radio networks. For geometric searching, it turned out to suffice whether the radius rather than the length of some path between any two vertices is bounded relatively to their geometric distance; this is the defining property of weak spanners. Finally regarding radio network applications, a power spanner accounts for the total energy afforded for a wireless transmission with the requirement that the sum of the squares of the lengths of some path between any two planar vertices must be bounded relatively to the square of their geometric distance (or higher powers up to 6 or even 8).

While it is known that any $c$-spanner is also both a weak $C_1$-spanner and a $C_2$-power spanner (for appropriate $C_1,C_2$ depending only on $c$ but not on the graph under consideration), we show that the converse fails: There exists a family of $c_1$-power spanners that are no weak $C$-spanners and also a family of weak $c_2$-spanners that are no $C$-spanners for any fixed $C$ (and thus no uniform spanners, either). However the deepest result of the present work reveals that, surprisingly, any weak spanner is also a uniform power spanner. We further generalize the latter notion by considering $(c,delta)$-power spanners where the sum of the $delta$-th powers of the lengths has to be bounded; so $(cdot,2)$-power spanners coincide with the usual power spanners and $(cdot,1)$-power spanners are classical spanners. Interestingly, these $(cdot,delta)$-power spanners form a strict hierarchy where the above results still hold for any $deltageq2$; some even hold for $delta>1$ while counterexamples reveal others to fail for $delta<2$. In fact we show that in general every self-similar curve of fractal dimension $d>delta$ is no $(C,delta)$-power spanner for any fixed $C$. }, author = {Schindelhauer, Christian and Volbert, Klaus and Ziegler, Martin}, booktitle = {Proc. of 15th Annual International Symposium on Algorithms and Computation (ISAAC'04)}, isbn = {9783540241317}, issn = {0302-9743}, pages = {805--821}, publisher = {Springer }, title = {{Spanners, Weak Spanners, and Power Spanners for Wireless Networks}}, doi = {10.1007/978-3-540-30551-4_69}, volume = {3341}, year = {2004}, } @inproceedings{18786, author = {Sohler, Christian and Czumaj, Artur}, booktitle = {Automata, Languages and Programming (ICALP)}, number = {1}, pages = {396--407}, title = {{Sublinear-Time Approximation for Clustering via Random Sampling}}, year = {2004}, } @inproceedings{18263, abstract = {We generalize univariate multipoint evaluation of polynomials of degree n at sublinear amortized cost per point. More precisely, it is shown how to evaluate a bivariate polynomial p of maximum degree less than n, specified by its n^2 coefficients, simultaneously at n^2 given points using a total of O(n^2.667) arithmetic operations. In terms of the input size N being quadratic in n, this amounts to an amortized cost of O(N^0.334) per point.}, author = {Nüsken, Michael and Ziegler, Martin}, booktitle = {Proc. 12th Annual Symposium on Algorithms (ESA'04)}, isbn = {9783540230250}, issn = {0302-9743}, pages = {544--555}, publisher = {Springer}, title = {{Fast Multipoint Evaluation of Bivariate Polynomials}}, doi = {10.1007/978-3-540-30140-0_49}, volume = {3221}, year = {2004}, } @article{17986, author = {Ziegler, Martin and Brattka, Vasco}, journal = {Theoretical Computer Science}, number = {1-3}, pages = {187--211}, title = {{Computability in linear algebra}}, doi = {https://doi.org/10.1016/j.tcs.2004.06.022}, volume = {326}, year = {2004}, } @article{16399, abstract = {We present a new data structure for rendering highly complex virtual environments of arbitrary topology. The special feature of our approach is that it allows an interactive navigation in very large scenes (30 GB/400 million polygons in our benchmark scenes) that cannot be stored in main memory, but only on a local or remote hard disk. Furthermore, it allows interactive rendering of substantially more complex scenes by instantiating objects. The sampling process is done in the preprocessing. There, the polygons are randomly distributed in our hierarchical data structure, the randomized sample tree. This tree only uses space that is linear in the number of polygons. In order to produce an approximate image of the scene, the tree is traversed and polygons stored in the visited nodes are rendered. During the interactive walkthrough, parts of the sample tree are loaded from local or remote hard disk. We implemented our algorithm in a prototypical walkthrough system. Analysis and experiments show that the quality of our images is comparable to images computed by the conventional z-buffer algorithm regardless of the scene topology.}, author = {Klein, Jan and Krokowski, Jens and Fischer, Matthias and Wand, Michael and Wanka, Rolf and Meyer auf der Heide, Friedhelm}, issn = {1054-7460}, journal = {Presence: Teleoperators and Virtual Environments}, pages = {617--637}, title = {{The Randomized Sample Tree: A Data Structure for Interactive Walk-Throughs in Externally Stored Virtual Environments}}, doi = {10.1162/1054746043280619}, year = {2004}, } @inproceedings{18364, abstract = {The visualisation of manufacturing-processes assists the user in understanding and analysis. Typically he can move free and unguided in a virtual environment which visualizes the entire process. Thus knowledge and conclusions are to some extend acquired on a random base. This article describes the development of a tool, which enables the user to interactively improve significant production processes in the simulation. He moves in a virtual 3D-environment (walkthrough system) and is able to acquire automatically calculated indications for significant processes. At the same time the simulation considers significant objects in a more detailed way. If the viewer is interested in a significant process, he is automatically guided to the relevant location where he can examine the critical situation by modification of the simulation model.}, author = {Mueck, Bengt and Dangelmaier, Wilhelm and Laroque, Christoph and Fischer, Matthias and Kortenjan, Michael}, booktitle = {Simulation and Visualisation 2004}, pages = {73--83}, publisher = {SCS European Publishing House}, title = {{Guidance of Users in Interactive 3D-Visualisations of Material Flow Simulations}}, year = {2004}, } @article{16477, author = {Meyer auf der Heide, Friedhelm and Schindelhauer, Christian and Volbert, Klaus and Grünewald, Matthias}, issn = {1432-4350}, journal = {Theory of Computing Systems}, pages = {343--370}, title = {{Congestion, Dilation, and Energy in Radio Networks}}, doi = {10.1007/s00224-004-1124-z}, year = {2004}, } @article{18447, author = {Oesterdiekhoff, Brigitte}, journal = {Informatik Spektrum}, number = {5}, pages = {448--452}, title = {{Transcoding von Webinhalten}}, volume = {27}, year = {2004}, } @inbook{18258, abstract = {Multi-evaluation of the Coulomb potential induced by N particles is a central part of N-body simulations. In 3D, known subquadratic time algorithms return approximations up to given ABSOLUTE precision. By combining data structures from Computational Geometry with fast polynomial arithmetic, the present work obtains approximations of prescribable RELATIVE error e>0 in time O(1/e*N*polylog N).}, author = {Ziegler, Martin}, booktitle = {Lecture Notes in Computer Science}, editor = {Dehne, F. and Sack, JR. and Smid, M.}, isbn = {9783540405450}, issn = {0302-9743}, publisher = {Springer}, title = {{Fast Relative Approximation of Potential Fields}}, doi = {10.1007/978-3-540-45078-8_13}, volume = {2748}, year = {2003}, } @inproceedings{18791, abstract = {We consider the problem of finding the weight of a Euclidean minimum spanning tree for a set of n points in ℝd. We focus on the situation when the input point set is supported by certain basic (and commonly used) geometric data structures that can provide efficient access to the input in a structured way. We present an algorithm that estimates with high probability the weight of a Euclidean minimum spanning tree of a set of points to within 1 + ε using only \~{O}(√ poly(1/ε)) queries for constant d. The algorithm assumes that the input is supported by a minimal bounding cube enclosing it, by orthogonal range queries, and by cone approximate nearest neighbors queries.}, author = {Magen, Avner and Ergun, Funda and Sohler, Christian and Rubinfeld, Ronitt and Czumaj, Artur and Newman, Ilan and Fortnow, Lance}, booktitle = {Proceedings of the 14th ACM-SIAM Symposium on Discrete Algorithms (SODA 2003)}, isbn = {0898715385}, pages = {813–822}, title = {{Sublinear Approximation of Euclidean Minimum Spanning Tree}}, year = {2003}, } @inproceedings{18260, abstract = {For uniform computability of regular sets in Euclidean space, previous work has identified twelve 'basic' notions, to (pairs of) which many previous notions considered in literature were shown to be equivalent.

With respect to those basic notions, we now investigate on the computability of natural OPERATIONS on regular sets: union, intersection, complement, convex hull, image, and pre-image under suitable classes of functions.}, author = {Ziegler, Martin}, booktitle = {Computability and Complexity in Analysis}, issn = {0942-5616}, number = {302-8/2003}, pages = {389--406}, title = {{Computable operators on regular sets}}, doi = {10.1002/malq.200310107}, year = {2003}, } @article{16481, abstract = {

Gegenüber anderen bekannten Web-Computing Projekten wie z.B. SETI@home oder distributed.net zeichnet sich dieses System dadurch aus, dass nicht Clients von einem zentralen Server "häppchenweise" unabhängige Teilprobleme anfordern und lösen, sondern dass die Clients gemeinsam an einem Problem arbeiten, indem sie nach dem BSP-Modell miteinander kommunizieren und sich synchronisieren.}, author = {Gehweiler, Joachim}, title = {{Entwurf und Implementierung einer Laufzeitumgebung für parallele Algorithmen in Java}}, year = {2003}, } @article{16482, author = {Juurlink, Ben and Kolman, Petr and Meyer auf der Heide, Friedhelm and Rieping, Ingo}, issn = {1570-8667}, journal = {Journal of Discrete Algorithms}, pages = {151--166}, title = {{Optimal broadcast on parallel locality models}}, doi = {10.1016/s1570-8667(03)00023-6}, year = {2003}, } @inproceedings{17423, author = {Mueck, Bengt and Dangelmaier, Wilhelm and Fischer, Matthias}, booktitle = {15th European Simulation Symposium (ESS 2003)}, pages = {367--371}, publisher = {SCS - Europe}, title = {{Components for the Active Support of the Analysis of Material Flow Simulations in a Virtual Environment}}, year = {2003}, } @inproceedings{18907, abstract = {In a (randomized) oblivious routing scheme the path chosen for a request

between a source $s$ and a target $t$ is independent from the current traffic

in the network. Hence, such a scheme consists of probability distributions

over $s-t$ paths for every source-target pair $s,t$ in the network.

In a recent result citeR02 it was shown that for any undirected network

there is an oblivious routing scheme that achieves a polylogarithmic

competitive ratio with respect to congestion. Subsequently, Azar et

al. citeACF+03 gave a polynomial time algorithm that for a given network

constructs the best oblivious routing scheme, i.e. the scheme that guarantees

the best possible competitive ratio.

Unfortunately, the latter result is based on the Ellipsoid algorithm; hence

it is unpractical for large networks.

In this paper we present a combinatorial algorithm for constructing an

oblivious routing scheme that guarantees a competitive ratio of $O(log^4n)$

for undirected networks. Furthermore, our approach yields a proof

for the existence of an oblivious routing scheme with competitive ratio

$O(log^3n)$, which is much simpler than the original proof from citeR02.}, author = {Bienkowski, Marcin and Korzeniowski, Miroslaw and Räcke, Harald}, booktitle = {Proceedings of the fifteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '03}, isbn = {1581136617}, title = {{A practical algorithm for constructing oblivious routing schemes}}, doi = {10.1145/777412.777418}, year = {2003}, } @inproceedings{18960, abstract = {We investigate distributed algorithms for mobile ad hoc networks for moving radio stations with adjustable transmission power in a worst case scenario. We consider two models to find a reasonable restriction on the worst-case mobility. In the pedestrian model we assume a maximum speed $v_max$ of the radio stations, while in the vehicular model we assume a maximum acceleration $a_max$ of the points. Our goal is to maintain persistent routes with nice communication network properties like hop-distance, energy-consumption, congestion and number of interferences. A route is persistent, if we can guarantee that all edges of this route can be uphold for a given time span $Delta$, which is a parameter denoting the minimum time the mobile network needs to adopt changes, i.e. update routing tables, change directory entries, etc. This $Delta$ can be used as the length of an update interval for a proactive routing scheme. We extend some known notions such as transmission range, interferences, spanner, power spanner and congestion to both mobility models and introduce a new parameter called crowdedness that states a lower bound on the number of radio interferences. Then we prove that a mobile spanner hosts a path system that polylogarithmically approximates the optimal congestion. We present distributed algorithms based on a grid clustering technique and a high-dimensional representation of the dynamical start situation which construct mobile spanners with low congestion, low interference number, low energy-consumption, and low degree. We measure the optimality of the output of our algorithm by comparing it with the optimal choice of persistent routes under the same circumstances with respect to pedestrian or vehicular worst-case movements. Finally, we present solutions for dynamic position information management under our mobility models.}, author = {Schindelhauer, Christian and Lukovszki, Tamás and Rührup, Stefan and Volbert, Klaus}, booktitle = {Proc. of the 15th ACM Symposium on Parallel Algorithms and Architectures (SPAA03)}, isbn = {1581136617}, title = {{Worst case mobility in ad hoc networks}}, doi = {10.1145/777412.777448}, year = {2003}, } @phdthesis{18573, author = {Sohler, Christian}, isbn = {3-935433-28-X}, title = {{Property Testing and Geometry}}, year = {2003}, } @proceedings{16484, editor = {Rosenberg, Arnold L. and Meyer auf der Heide, Friedhelm}, isbn = {1581136617}, title = {{Proceedings of the fifteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '03}}, doi = {10.1145/777412}, year = {2003}, } @inproceedings{16720, author = {Bonorden, Olaf and Bruls, N. and Kastens, U. and Le, D. K. and Meyer auf der Heide, Friedhelm and Niemann, J.-C. and Porrmann, M. and Rückert, U. and Slowik, A. and Thies, M.}, booktitle = {28th Annual IEEE International Conference on Local Computer Networks}, title = {{A holistic methodology for network processor design}}, doi = {10.1109/LCN.2003.1243185}, year = {2003}, } @inproceedings{18372, abstract = {Simulation und Visualisierung sind anerkannte Mittel zum Verstehen und Analysieren von Fertigungsprozessen. In Visualisierungen von Fertigungsprozessen können Betrachter frei und ungeleitet umherwandern. Erkenntnisse werden so aber eher zufällig erworben. Dieser Artikel skizziert ein System und Methoden, die den Betrachter unterstützen auf auffällige/signifikante Prozesse/ Punkte in Materialflusssimulationen aufmerksam zu werden und diese zu entschärfen. Es wird der Entwurf eines Werkzeugs beschrieben, dass den Betrachter einer Simulation die Möglichkeit bietet, signifikante Produktionsprozesse interaktiv zu verbessern. Der Benutzer wird sich in einer virtuellen 3D-Umgebung (Walkthrough-System) bewegen können und automatisch ermittelte Indizien für signifikante Abläufe erhalten. Zugleich soll die Simulation signifikante Objekte genauer simulieren. Bekundet der Benutzer Interesse an einem signifikanten Prozess, wird er automatisch zu dem jeweiligen Ort geführt werden und dort durch Eingriffe in die Simulation die kritische Situation experimentell untersuchen können. Da der kritische Moment in der Vergangenheit liegt und somit vom Betrachter schon verpasst ist, wird es dem Betrachter möglich sein, die Simulation auf einen Zeitpunkt vor dem Eintreten zurück zu setzen. Die virtuelle Szene (3D-Grafik-Modelle) einer typischen dynamischen Simulationsumgebung ist in der Regel zu komplex, um sie in Echtzeit in einem Walkthrough-System zu visualisieren und darzustellen. Typischerweise werden Approximationsverfahren eingesetzt, um die Komplexität zu reduzieren und ein flüssiges Navigieren des Betrachters zu erlauben. Durch spezifische Simulations-Anforderungen ist bekannt, an welchen Objekten des Simulationsmodells Probleme auftreten; sie sind für den Betrachter wichtig. Die zugehörigen virtuellen 3D-Repräsentanten, können von den Approximationsalgorithmen mit einer besonders hohen Darstellungsqualität dargestellt werden und die übrigen Teile der virtuellen Szene entsprechend vernachlässigt werden. Solche Approximationsalgorithmen und Datenstrukturen nutzen die spezifischen Eigenschaften virtueller Simulationsumgebungen aus, um eine hohe Darstellungsqualität und Darstellungsperformance zu erreichen. }, author = {Dangelmaier, Wilhelm and Franke, Werner and Mueck, Bengt and Fischer, Matthias}, booktitle = {2. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}, pages = {141--151}, title = {{Komponenten zur aktiven Unterstützung der Analyse von Materialflusssimulationen in virtuellen Umgebungen}}, volume = {123}, year = {2003}, } @article{18567, author = {Adler, Micah and Vöcking, Berthold and Sohler, Christian and Räcke, Harald and Sivadasan, Naveen}, journal = {Combinatorics, Probability & Computing}, pages = {225--244}, title = {{Randomized Pursuit-Evasion in Graphs}}, year = {2003}, } @inproceedings{18947, abstract = {In this paper, we define a Petri net model for the network or routing layer of a mobile ad hoc network. Such networks require routing strategies substantially different from those used in static communication networks. The model pre- sented consists of two layers, a location service and a po- sition based routing. Both are described in detail. Our ap- proach considers a very strong definition of fault tolerance thereby improving state-of-the-art ad hoc routing protocols in several respects. Modeling of the communication archi- tecture for mobile ad hoc networks is part of our overall effort towards a design methodology for distributed embed- ded real-time systems including dynamically evolving com- ponents.}, author = {Rust, Carsten and Stappert, Friedhelm and Lukovszki, Tamás}, booktitle = {7th World Multiconference on Systemics, Cybernetics and Informatics}, title = {{A Petri Net Model for the Network Layer of a Mobile Ad Hoc Network Architecture}}, year = {2003}, } @inproceedings{18966, abstract = {A recent seminal result of Räcke is that for any undirected network there is an oblivious routing algorithm with a polylogarithmic competitive ratio with respect to congestion. Unfortunately, Räcke's construction is not polynomial time. We give a polynomial time construction that guarantees Räcke's bounds, and more generally gives the true optimal ratio for any (undirected or directed) network.}, author = {Azar, Yossi and Cohen, Edith and Fiat, Amos and Kaplan, Haim and Racke, Harald}, booktitle = {Proceedings of the thirty-fifth ACM symposium on Theory of computing - STOC '03}, isbn = {1581136749}, title = {{Optimal oblivious routing in polynomial time}}, doi = {10.1145/780542.780599}, year = {2003}, } @article{18176, author = {Ziegler, Martin}, issn = {0942-5616}, journal = {Mathematical Logic Quarterly (MLQ)}, number = {S1}, pages = {157--181}, title = {{Computability on Regular Subsets of Euclidean Space}}, doi = {10.1002/1521-3870(200210)48:1+<157::aid-malq157>3.0.co;2-4}, volume = {48}, year = {2002}, } @phdthesis{18169, abstract = {Die Implementierung von Algorithmen zur Lösung geometrischer Probleme im Euklidischen Raum (z.B. Berechnung der konvexen Hülle oder des Durchschnitts zweier Polyeder) stellt sich oftmals als hochgradig nichttrivial heraus. Ob und unter welchen Voraussetzungen die verursachenden numerischen Instabilitäten überhaupt ini den Griff zu kriegen oder vielmehr dem Problem inhärent sind, untersucht diese Arbeit in einem auf Turing zurückgehenden Rechenmodell. Im Gegensatz zu algebraischen Ansätzen geht jenes nicht von der Verfügbarkeit exakter Tests auf z.B. Gleichheit reeller Zahlen aus, sondern berücksichtigt die auf Digitalcomputern tatsächlich realisierbare Approximation durch rationale Zahlen. In diesem Rahmen werden beweisbar stabile Algorithmen zum Lösen linearer Gleichungssysteme, zur Matrix-Diagonalisierung und zur linearen wie nichtlinearen Optimierung präsentiert. Als wichtiges technisches Hilfsmittel dient ein neuer Berechenbarkeitsbegriff für reguläre unendliche Mengen reller Zahlen, der sich aus dem systematischen Vergleich verschiedener der Literatur entnommener ad-hoc Ansätze ergibt.}, author = {Ziegler, Martin}, title = {{Zur Berechenbarkeit reeller geometrischer Probleme}}, year = {2002}, } @inproceedings{18177, abstract = {Consider the classical point location problem: for a fixed arrangement of m hyperplanes and its induced partition of d-space report, upon input of some point, which face it lies in. With sufficient memory, this is easy to solve in logarithmic time O(log m). But how fast can algorithms (formalized as Linear Decision Trees) of *minimum* size be? The present work gives lower and upper bounds for the time complexity of point location under this constraint. They show that, in addition to m, the maximum number w of walls of a cell turns out to be a crucial parameter. We also consider a relaxation of the strict minimum-size condition allowing for constant factor overhead.}, author = {Ziegler, Martin and Damerow, Valentina and Finschi, Lukas}, booktitle = {Proceedings of the 14th Canadian Conference on Computational Geometry (CCCG'02)}, title = {{Point Location Algorithms of Minimum Size}}, year = {2002}, } @inbook{16723, author = {Meyer auf der Heide, Friedhelm and Kumar, Mohan and Nikoletseas, Sotiris and Spirakis, Paul}, booktitle = {Euro-Par 2002 Parallel Processing}, isbn = {9783540440499}, issn = {0302-9743}, title = {{Mobile Computing, Mobile Networks}}, doi = {10.1007/3-540-45706-2_133}, year = {2002}, } @inproceedings{18179, abstract = {Do the solutions of linear equations depend computably on their coefficients? Implicitly, this has been one of the central questions in linear algebra since the very beginning of the subject and the famous Gauß algorithm is one of its numerical answers. Today there exists a tremendous number of algorithms which solve this problem for different types of linear equations. However, actual implementations in floating point arithmetic keep exhibiting numerical instabilities for ill-conditioned inputs. This situation raises the question which of these instabilities are intrinsic, thus caused by the very nature of the problem, and which are just side effects of specific algorithms. To approach this principle question we revisit linear equations from the rigorous point of view of computability. Therefore we apply methods of computable analysis, which is the Turing machine based theory of computable real number functions. It turns out that, given the coefficients of a system of linear equations, we can compute the space of solutions, if and only if the dimension of the solution space is known in advance. Especially, this explains why there cannot exist any stable algorithms under weaker assumptions.}, author = {Brattka, Vasco and Ziegler, Martin}, booktitle = {Proceedings of the 2nd IFIP International Conference on Theoretical Computer Science}, pages = {95--106}, title = {{Computability of Linear Equations}}, doi = {10.1007/978-0-387-35608-2_9}, year = {2002}, } @inproceedings{16490, abstract = {We present a new data structure for rendering highly complex virtual environments of arbitrary topology. The special feature of our approach is that it allows an interactive navigation in very large scenes (30 GB/400 million polygons in our benchmark scenes) that cannot be stored in main memory, but only on a local or remote hard disk. Furthermore, it allows interactive rendering of substantially more complex scenes by instantiating objects. For the computation of an approximate image of the scene, a sampling technique is used. In the preprocessing, a so-called sample tree is built whose nodes contain randomly selected polygons from the scene. This tree only uses space that is linear in the number of polygons. In order to produce an image of the scene, the tree is traversed and polygons stored in the visited nodes are rendered. During the interactive walkthrough, parts of the sample tree are loaded from local or remote hard disk. We implemented our algorithm in a prototypical walkthrough system. Analysis and experiments show that the quality of our images is comparable to images computed by the conventional z-buffer algorithm regardless of the scene topology.}, author = {Klein, Jan and Krokowski, Jens and Fischer, Matthias and Wand, Michael and Wanka, Rolf and Meyer auf der Heide, Friedhelm}, booktitle = {Proceedings of the ACM symposium on Virtual reality software and technology - VRST '02}, isbn = {1581135300}, title = {{The randomized sample tree: a data structure for interactive walkthroughs in externally stored virtual environments}}, doi = {10.1145/585740.585764}, year = {2002}, } @inproceedings{18369, abstract = {Visualising is a method used to help experiencing and understanding causal cohesions in simulation processes. For this purpose, tools for visualising are already implemented in prevalent simulation systems. The user creates his simulation model and generates a 3-dimensional (2,5-dimensional) visualising by means of the simulation system. This helps examining the process which makes it easier for the viewer to understand it. Simulation tools usually only provide the opportunity for a unidirectional visualising. In a 3-dimensional surrounding the viewer can not implement an interaction with the simulation while the system is running. Though an interaction during the simulation run enables the user to gain a better understanding of causal cohesions. Solutions via HLA are sophisticated and therefore rather suited for extensive projects. We present a distributed system consisting of a commercial manufacturing simulation tool, a coupling module and a walkthrough system. The distributed system in conjunctions with the coupling module guarantees generality and a wide field of applications of the walkthrough system. Further it guarantees flexibility and selection of the specialized graphics hardware for the walkthrough system. A further contribution of this paper is the solution of the time synchronisation problem caused by simulation tool and walkthrough system. }, author = {Mueck, Bengt and Dangelmaier, Wilhelm and Fischer, Matthias and Klemisch, Wolfram}, booktitle = {Simulation und Visualisierung}, pages = {71--84}, publisher = {SCS European Publishing House}, title = {{Bi-directional Coupling of Simulation Tools with a Walkthrough-System}}, year = {2002}, } @inproceedings{18566, abstract = {We analyze a randomized pursuit-evasion game on graphs. This game is played by two players, a hunter and a rabbit. Let G be any connected, undirected graph with n nodes. The game is played in rounds and in each round both the hunter and the rabbit are located at a node of the graph. Between rounds both the hunter and the rabbit can stay at the current node or move to another node. The hunter is assumed to be restricted to the graph G: in every round, the hunter can move using at most one edge. For the rabbit we investigate two models: in one model the rabbit is restricted to the same graph as the hunter, and in the other model the rabbit is unrestricted, i.e., it can jump to an arbitrary node in every round. We say that the rabbit is caught as soon as hunter and rabbit are located at the same node in a round. The goal of the hunter is to catch the rabbit in as few rounds as possible, whereas the rabbit aims to maximize the number of rounds until it is caught. Given a randomized hunter strategy for G, the escape length for that strategy is the worst case expected number of rounds it takes the hunter to catch the rabbit, where the worst case is with regards to all (possibly randomized) rabbit strategies. Our main result is a hunter strategy for general graphs with an escape length of only O (n log (diam(G))) against restricted as well as unrestricted rabbits. This bound is close to optimal since Ω(n) is a trivial lower bound on the escape length in both models. Furthermore, we prove that our upper bound is optimal up to constant factors against unrestricted rabbits.}, author = {Adler, Micah and Räcke, Harald and Sivadasan, Naveen and Sohler, Christian and Vöcking, Berthold}, booktitle = {Proceedings of the 29th International Colloquium on Automata, Languages and Programming}, isbn = {9783540438649}, issn = {0302-9743}, title = {{Randomized Pursuit-Evasion in Graphs}}, doi = {10.1007/3-540-45465-9_77}, year = {2002}, } @article{16489, author = {Krick, C. and Meyer auf der Heide, Friedhelm and Räcke, H. and Vöcking, B. and Westermann, M.}, issn = {1432-4350}, journal = {Theory of Computing Systems}, pages = {217--245}, title = {{Data Management in Networks: Experimental Evaluation of a Provably Good Strategy}}, doi = {10.1007/s00224-001-1045-z}, year = {2002}, } @inproceedings{16491, author = {Meyer auf der Heide, Friedhelm and Schindelhauer, Christian and Volbert, Klaus and Grünewald, Matthias}, booktitle = {Proceedings of the fourteenth annual ACM symposium on Parallel algorithms and architectures - SPAA '02}, isbn = {1581135297}, title = {{Energy, congestion and dilation in radio networks}}, doi = {10.1145/564870.564910}, year = {2002}, } @article{18853, author = {Sohler, Christian and Czumaj, Artur}, journal = {Proceedings of the 43th Symposium on Foundations of Computer Science (FOCS)}, pages = {83--92}, title = {{Abstract Combinatorial Programs and Efficient Property Testers}}, year = {2002}, } @techreport{18961, author = {Lukovszki, Tamás and Benczúr, A.}, title = {{A Degree O(log log n) Fault Tolerant Distributed Location Service for Geographic Ad-Hoc Routing}}, year = {2002}, } @inproceedings{18152, abstract = {Computing the spectral decomposition of a normal matrix is among the most frequent tasks to numerical mathematics. A vast range of methods are employed to do so, but all of them suffer from instabilities when applied to degenerate matrices, i.e., those having multiple eigenvalues. We investigate the spectral representation's effectivity properties on the sound formal basis of computable analysis. It turns out that in general the eigenvectors cannot be computed from a given matrix. If however the size of the matrix' spectrum (=number of different eigenvalues) is known in advance, it can be diagonalized effectively. Thus, in principle the spectral decomposition can be computed under remarkably weak non-degeneracy conditions.}, author = {Ziegler, Martin and Brattka, Vasco}, booktitle = {Proceedings of the 4th Workshop on Computability and Complexity in Analysis (CCA'2000)}, isbn = {9783540421979}, issn = {0302-9743}, pages = {378--388}, title = {{A Computable Spectral Theorem}}, doi = {10.1007/3-540-45335-0_23}, volume = {2064}, year = {2001}, } @inproceedings{16492, abstract = {We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of an arbitrary three-dimensional scene consisting of triangular primitives by reconstruction from a dynamically chosen set of random surface sample points. This approach is independent of mesh connectivity and topology. The resulting rendering time grows only logarithmically with the numbers of triangles in the scene. We were able to render walkthroughs of scenes of up to 10^14 triangles at interactive frame rates. Automatic identification of low detail scene components ensures that the rendering speed of the randomized z-buffer cannot drop below that of conventional z-buffer rendering. Experimental and analytical evidence is given that the image quality is comparable to that of common approaches like z-buffer rendering. The precomputed data structures employed by the randomized z-buffer allow for interactive dynamic updates of the scene. Their memory requirements grow only linearly with the number of triangles and allow for a scene graph based instantiation scheme to further reduce memory consumption.}, author = {Wand, Michael and Fischer, Matthias and Peter, Ingmar and Meyer auf der Heide, Friedhelm and Straßer, Wolfgang}, booktitle = {Proceedings of the 28th annual conference on Computer graphics and interactive techniques - SIGGRAPH '01}, isbn = {158113374X}, title = {{The randomized z-buffer algorithm}}, doi = {10.1145/383259.383299}, year = {2001}, } @book{16722, editor = {Meyer auf der Heide, Friedhelm}, isbn = {9783540424932}, issn = {0302-9743}, publisher = {Springer }, title = {{Algorithms — ESA 2001, 9th Annual European Symposium Århus, Denmark}}, doi = {10.1007/3-540-44676-1}, year = {2001}, } @inbook{16493, author = {Meyer auf der Heide, Friedhelm}, booktitle = {Graph-Theoretic Concepts in Computer Science}, isbn = {9783540427070}, issn = {0302-9743}, title = {{Data Management in Networks}}, doi = {10.1007/3-540-45477-2_2}, volume = {2204}, year = {2001}, } @inbook{16494, author = {Meyer auf der Heide, Friedhelm and Wanka, Rolf}, booktitle = {Computational Science - ICCS 2001}, isbn = {9783540422334}, issn = {0302-9743}, title = {{Parallel Bridging Models and Their Impact on Algorithm Design}}, doi = {10.1007/3-540-45718-6_68}, year = {2001}, } @inproceedings{18166, abstract = {What is the maximum number of edges of the d-dimensional hypercube, denoted by S(d,k), that can be sliced by k many hyperplanes? This question on combinatorial properties of Euclidean geometry arising from linear separability considerations in the theory of Perceptrons has become an issue on its own. We use computational and combinatorial methods to obtain new bounds on S(d,k), s<=8. These strengthen earlier results on hypercube cut numbers.}, author = {Ziegler, Martin and Emamy-Khansari, M. Reza}, booktitle = {Proceedings of the First International Conference on Discrete Models - Combinatorics, Computation and Geometry (DM-CCG'2001)}, pages = {155--164}, title = {{New Bounds for Hypercube Slicing Numbers}}, volume = {AA}, year = {2001}, } @inproceedings{18370, abstract = {We present a new approximate occlusion-culling algorithm that in contrast to other algorithms, manages the objects of the scene in a 3D-sectorgraph. For generating a frame, as far as possible only the visible objects are rendered that can be found quickly by an edge of the graph. The algorithm allows a real-time navigation with over 20 frames per second in complex scenes consisting of over 10 millions of polygons. Moreover, approximation errors are very low.}, author = {Klein, Jan and Fischer, Matthias}, booktitle = {Proc. of 3. GI-Informatiktage 2001}, pages = {275 -- 278}, title = {{Occlusion Culling for Virtual Environments based on the 3D-Sectorgraph}}, year = {2001}, } @inproceedings{18750, author = {Sohler, Christian and Czumaj, Artur}, booktitle = {Proceedings of the 12th ACM-SIAM Symposium on Discrete Algorithms}, pages = {865--872}, title = {{Soft Kinetic Data Structures}}, year = {2001}, } @inproceedings{18964, author = {Lukovszki, Tamás and Maheshwari, Anil and Zeh, Norbert}, booktitle = {Proceedings of the 21st Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2001), LNCS}, isbn = {9783540430025}, issn = {0302-9743}, title = {{I/O-Efficient Batched Range Counting and Its Applications to Proximity Problems}}, doi = {10.1007/3-540-45294-x_21}, year = {2001}, } @article{18749, author = {Czumaj, Artur and Sohler, Christian}, isbn = {9783540422877}, issn = {0302-9743}, journal = {Proceedings of the 28th International Colloquium on Automata, Languages and Programming (ICALP)}, pages = {493--505}, title = {{Testing Hypergraph Coloring}}, doi = {10.1007/3-540-48224-5_41}, year = {2001}, } @article{18857, abstract = {This paper investigates geometric problems in the context of property testing algorithms. Property testing is an emerging area in computer science in which one is aiming at verifying whether a given object has a predetermined property or is “far” from any object having the property. Although there has been some research previously done in testing geometric properties, prior works have been mostly dealing with the study of combinatorial notion of the distance defining whether an object is “far” or it is “close”; very little research has been done for geometric notion of distance measures, that is, distance measures that are based on the geometry underlying input objects. The main objective of this work is to develop sound models to study geometric problems in the context of property testing. Comparing to the previous work in property testing, there are two novel aspects developed in this paper: geometric measures of being close to an object having the predetermined property, and the use of geometric data structures as basic primitives to design the testers. We believe that the second aspect is of special importance in the context of property testing and that the use of specialized data structures as basic primitives in the testers can be applied to other important problems in this area. We shall discuss a number of models that in our opinion fit best geometric problems and apply them to study geometric properties for three very fundamental and representative problems in the area: testing convex position, testing map labeling, and testing clusterability.}, author = {Sohler, Christian and Czumaj, Artur}, journal = {Proceedings of the 9th Annual European Symposium on Algorithms (ESA`01)}, pages = {266--277}, title = {{Property Testing with Geometric Queries}}, doi = {10.1007/3-540-44676-1_22}, year = {2001}, } @inproceedings{18168, abstract = {We consider the classical LINEAR OPTIMIZATION Problem, but in the Turing rather than the RealRAM model. Asking for mere computability of a function's maximum over some closed domain, we show that the common presumptions 'full-dimensional' and `bounded' in fact cannot be omitted: The sound framework of Recursive Analysis enables us to rigorously prove this folkloristic observation! On the other hand, convexity of this domain may be weakened to connectedness, and even NON-linear functions turn out to be effectively optimizable.}, author = {Brattka, Vasco and Ziegler, Martin}, booktitle = {Proceedings of the 13th Canadian Conference on Computational Geometry (CCCG'01)}, pages = {181--184}, title = {{Turing Computability of (Non-)Linear Optimization}}, year = {2001}, } @inbook{16497, author = {Meyer auf der Heide, Friedhelm and Kutyłowski, Mirosław and Ragde, Prabhakar}, booktitle = {Euro-Par 2000 Parallel Processing}, isbn = {9783540679561}, issn = {0302-9743}, title = {{Complexity Theory and Algorithms}}, doi = {10.1007/3-540-44520-x_59}, year = {2000}, } @article{17010, author = {Czumaj, Artur and Meyer auf der Heide, Friedhelm and Stemann, Volker}, issn = {0097-5397}, journal = {SIAM Journal on Computing}, pages = {1703--1739}, title = {{Contention Resolution in Hashing Based Shared Memory Simulations}}, doi = {10.1137/s009753979529564x}, year = {2000}, } @inproceedings{17990, abstract = {We consider the notion of Property Testing as applied to computational geometry. We aim at developing efficient algorithms which determine whether a given (geometrical) object has a predetermined property Q or is 'far' from any object having the property. We show that many basic geometric properties have very efficient testing algorithms, whose running time is significantly smaller than the object description size.}, author = {Czumaj, Artur and Sohler, Christian and Ziegler, Martin}, booktitle = {Proceedings of the 8th Annual European Symposium on Algorithms (ESA'00)}, isbn = {9783540410041}, issn = {0302-9743}, pages = {155--166}, publisher = {Springer}, title = {{Property Testing in Computational Geometry}}, doi = {10.1007/3-540-45253-2_15}, volume = {4698}, year = {2000}, } @inproceedings{18962, author = {Govindarajan, Sathish and Lukovszki, Tamas and Maheshwari, Anil and Zeh, Norbert}, booktitle = {Proceedings of the 8th Annual European Symposium on Algorithms (ESA 2000), LNCS}, issn = {0178-4617}, pages = {585--614}, title = {{I/O-Efficient Well-Separated Pair Decomposition and Applications}}, doi = {10.1007/s00453-005-1197-3}, year = {2000}, } @inproceedings{18146, abstract = {Since its very beginning, linear algebra is a highly algorithmic subject. Let us just mention the famous Gauss Algorithm which was invented before the theory of algorithms has been developed. The purpose of this paper is to link linear algebra explicitly to computable analysis, that is the theory of computable real number functions. Especially, we will investigate in which sense the dimension of a given linear subspace can be computed. The answer highly depends on how the linear subspace is given: if it is given by a finite number of vectors whose linear span represents the space, then the dimension does not depend continuously on these vectors and consequently it cannot be computed. If the linear subspace is represented via its distance function, which is a standard way to represent closed subspaces in computable analysis, then the dimension does computably depend on the distance function.}, author = {Ziegler, Martin and Brattka, Vasco}, booktitle = {SOFSEM 2000: Theory and Practice of Informatics}, isbn = {9783540413486}, issn = {0302-9743}, pages = {450--458}, publisher = {Springer}, title = {{Computing the Dimension of Linear Subspaces}}, doi = {10.1007/3-540-44411-4_34}, volume = {1963}, year = {2000}, } @techreport{17865, abstract = {We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of a three dimensional scene of triangular primitives by reconstruction from a random sample of surface points which are chosen with a probability proportional to the projected area of the objects. The approach is independent of mesh connectivity and topology. It leads to a rendering time that grows only logarithmically with the numbers of triangles in the scene and to linear memory consumption, thus allowing walkthroughs of scenes of extreme complexity. We consider different methods for image reconstruction which aim at correctness, rendering speed and image quality and we develop an efficient data structure for sample extraction in output-sensitive time which allows for efficient dynamic updates of the scene. Experiments confirm that scenes consisting of some hundred billion triangles can be rendered within seconds with an image quality comparable to a conventional z-buffer rendering; in special cases, realtime performance can be achieved.}, author = {Wand, Michael and Fischer, Matthias and Meyer auf der Heide, Friedhelm}, title = {{Randomized Point Sampling for Output-Sensitive Rendering of Complex Dynamic Scenes}}, year = {2000}, } @inproceedings{16495, author = {Meyer auf der Heide, Friedhelm and Räcke, H. and Westermann, M.}, booktitle = {Proceedings of the twelfth annual ACM symposium on Parallel algorithms and architectures - SPAA '00}, isbn = {1581131852}, title = {{Data management in hierarchical bus networks}}, doi = {10.1145/341800.341814}, year = {2000}, } @inproceedings{18150, abstract = {What is the minimum number of hyperplanes that slice all edges of the d-dimensional hypercube? The answers have been known for d<=4.

This work settles the problem for d=5 and d=6. More precisely, a computer search implies that 4 hyperplanes do not suffice for this purpose (but 5 do).

We also develop computational approaches for attacking this extremal problem from combinatorial geometry in higher dimensions. They allow us to determine for example all maximal sliceable subsets of hypercube edges up to dimension 7.}, author = {Ziegler, Martin and Sohler, Christian}, booktitle = {Proceedings of the 12th Canadian Conference on Computational Geometry (CCCG'00)}, pages = {73--79}, title = {{Computing Cut Numbers}}, year = {2000}, } @article{18446, author = {Lorys, Krzysztof and Wanka, Rolf and Oesterdiekhoff, Brigitte and Kutylowski, Miroslaw}, journal = {Journal of the ACM}, pages = {944--967}, title = {{Periodification Scheme: Constructing Sorting Networks with Constant Period}}, volume = {45}, year = {2000}, } @article{16345, author = {Meyer auf der Heide, Friedhelm and Wanka, Rolf}, journal = {ForschungsForum Paderborn}, pages = {112--116}, title = {{Von der Hollerith-Maschine zum Parallelrechner - Die alltägliche Aufgabe des Sortierens als Fortschrittsmotor für die Informatik}}, year = {2000}, } @inproceedings{16496, abstract = {We present a general framework for the development of on-line algorithms for data management in networks with limited memory capacities. These algorithms dynamically create and delete copies of shared data objects that can be read and written by the nodes in the network. Our algorithms aim to minimize the congestion, i.e., the maximum communication load over all network resources, so that that none of these resources become a communication bottleneck. We give several examples of networks for which our framework yields efficient algorithms, including meshes, fat-trees, hypercubic networks, and complete networks. For example, our framework yields an $O(d cdot log n)$-competitive caching algorithm for $d$-dimensional meshes of size $n$ with respect to the congestion on the communication links, and an $O(1)$-competitive algorithms for complete networks with respect to the congestion at the memory modules due to remote accesses. Previous work on data management in networks either neglects memory capacity constraints or minimizes only the total communication load, i.e., the sum of the communication load over all resources, which may produce congestion as some of the links become bottlenecks.}, author = {Meyer auf der Heide, Friedhelm and Vöcking, Berthold and Westermann, Matthias}, booktitle = {SODA '00: Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}, isbn = {0898714532}, pages = {430–439}, title = {{Caching in networks}}, year = {2000}, } @inbook{17053, author = {Meyer auf der Heide, Friedhelm and Vöcking, Berthold and Westermann, Matthias}, booktitle = {Algorithms - ESA’ 99}, isbn = {9783540662518}, issn = {0302-9743}, title = {{Provably Good and Practical Strategies for Non-uniform Data Management in Networks}}, doi = {10.1007/3-540-48481-7_9}, year = {1999}, } @article{16501, author = {Meyer auf der Heide, Friedhelm and Vöcking, Berthold}, issn = {0196-6774}, journal = {Journal of Algorithms}, pages = {105--131}, title = {{Shortest-Path Routing in Arbitrary Networks}}, doi = {10.1006/jagm.1998.0980}, year = {1999}, } @inproceedings{17864, abstract = {A geometric spanner with vertex set P in Rd is a sparse approximation of the complete Euclidean graph determined by P. We introduce the notion of partitioned neighborhood graphs (PNGs), unifying and generalizing most constructions of spanners treated in literature. Two important parameters characterizing their properties are the outdegree k in N and the stretch factor f>1 describing the quality of approximation. PNGs have been throughly investigated with respect to small values of f. We present in this work results about small values of k. The aim of minimizing k rather than f arises from two observations: * k determines the amount of space required for storing PNGs. * Many algorithms employing a (previously constructed) spanner have running times depending on its outdegree. Our results include, for fixed dimensions d as well as asymptotically, upper and lower bounds on this optimal value of k. The upper bounds are shown constructively and yield efficient algorithms for actually computing the corresponding PNGs even in degenerate cases. }, author = {Fischer, Matthias and Lukovszki, Tamas and Ziegler, Martin}, booktitle = {Proceedings of the 11th Canadian Conference on Computational Geometry}, title = {{Partitioned neighborhood spanners of minimal outdegree}}, year = {1999}, } @inproceedings{18576, abstract = {In this paper we deal with two problems on star-shaped polygons. First, we present a Las-Vegas algorithm that uniformly at random creates a star-shaped polygon whose vertices are given by a point set ( S ) of ( n ) points in the plane that does not admit degenerate star-shaped polygons. The expected running time of the algorithm is ( O(n^2log n) ) and it uses ( O(n) ) memory. We call a star-shaped polygon degenerate if its kernel has 0 area.

Secondly, we show how to count all star-shaped polygons whose vertices are a subset of ( S ) in ( O(n^5log n) ) time and ( O(n) ) space. The algorithm can also be used for random uniform generation. We also present lower and upper bounds on the number of star-shaped polygons.}, author = {Sohler, Christian}, booktitle = {Proceedings of the 11th Canadian Conference on Computational Geometry ('CCCG'99)}, pages = {174--177}, title = {{Generating Random Star-Shaped Polygons}}, year = {1999}, } @inproceedings{18747, abstract = {We present a new ( O(n) ) algorithm to compute good orders for the point set of a Delaunay triangulation of ( n ) points in the plane. Such a good order makes reconstruction in ( O(n) ) time with a simple algorithm possible. In contrast to the algorithm of Snoeyink and van Kreveld cite1, which is based on independent sets, our algorithm uses a breadth first search (BFS) to obtain these orders. Both approaches construct such orders by repeatedly removing a constant fraction of vertices from the current triangulation. The advantage of the BFS approach is that we can give significantly better bounds on the fraction of removed points in a phase of the algorithm. We can prove that a single phase of our algorithm removes at least ( frac13 ) of the points, even if we restrict the degree of the points (at the time they are removed) to 6. We implemented and compared both algorithms. Our algorithms is slightly faster and achieves about 15% better vertex data compression when using a simple variable length code to encode the differences between two consecutive vertices of the given order.}, author = {Sohler, Christian}, booktitle = {Proceedings of the 11th Canadian Conference on Computational Geometry ( CCCG'99)}, pages = {136--141}, title = {{Fast Reconstruction of Delaunay Triangulations}}, year = {1999}, } @article{16502, author = {Berenbrink, P. and Meyer auf der Heide, Friedhelm and Schröder, K.}, issn = {1432-4350}, journal = {Theory of Computing Systems}, pages = {281--300}, title = {{Allocating Weighted Jobs in Parallel}}, doi = {10.1007/s002240000119}, year = {1999}, }