@techreport{17865,
  abstract     = {{We present a new output-sensitive rendering algorithm, the randomized z-buffer algorithm. It renders an image of a three dimensional scene of triangular primitives by reconstruction from a random sample of surface points which are chosen with a probability proportional to the projected area of the objects. The approach is independent of mesh connectivity and topology. It leads to a rendering time that grows only logarithmically with the numbers of triangles in the scene and to linear memory consumption, thus allowing walkthroughs of scenes of extreme complexity. We consider different methods for image reconstruction which aim at correctness, rendering speed and image quality and we develop an efficient data structure for sample extraction in output-sensitive time which allows for efficient dynamic updates of the scene. Experiments confirm that scenes consisting of some hundred billion triangles can be rendered within seconds with an image quality comparable to a conventional z-buffer rendering; in special cases, realtime performance can be achieved.}},
  author       = {{Wand, Michael and Fischer, Matthias and Meyer auf der Heide, Friedhelm}},
  title        = {{{Randomized Point Sampling for Output-Sensitive Rendering of Complex Dynamic Scenes}}},
  year         = {{2000}},
}

@inproceedings{18962,
  author       = {{Govindarajan, Sathish and Lukovszki, Tamas and Maheshwari, Anil and Zeh, Norbert}},
  booktitle    = {{Proceedings of the 8th Annual European Symposium on Algorithms (ESA 2000), LNCS}},
  issn         = {{0178-4617}},
  pages        = {{585--614}},
  title        = {{{I/O-Efficient Well-Separated Pair Decomposition and Applications}}},
  doi          = {{10.1007/s00453-005-1197-3}},
  year         = {{2000}},
}

@inproceedings{17990,
  abstract     = {{We consider the notion of Property Testing as applied to computational geometry. We aim at developing efficient algorithms which determine whether a given (geometrical) object has a predetermined property Q or is 'far' from any object having the property. We show that many basic geometric properties have very efficient testing algorithms, whose running time is significantly smaller than the object description size.}},
  author       = {{Czumaj, Artur and Sohler, Christian and Ziegler, Martin}},
  booktitle    = {{Proceedings of the 8th Annual European Symposium on Algorithms (ESA'00)}},
  isbn         = {{9783540410041}},
  issn         = {{0302-9743}},
  pages        = {{155--166}},
  publisher    = {{Springer}},
  title        = {{{Property Testing in Computational Geometry}}},
  doi          = {{10.1007/3-540-45253-2_15}},
  volume       = {{4698}},
  year         = {{2000}},
}

@inproceedings{18146,
  abstract     = {{Since its very beginning, linear algebra is a highly algorithmic subject. Let us just mention the famous Gauss Algorithm which was invented before the theory of algorithms has been developed. The purpose of this paper is to link linear algebra explicitly to computable analysis, that is the theory of computable real number functions. Especially, we will investigate in which sense the dimension of a given linear subspace can be computed. The answer highly depends on how the linear subspace is given: if it is given by a finite number of vectors whose linear span represents the space, then the dimension does not depend continuously on these vectors and consequently it cannot be computed. If the linear subspace is represented via its distance function, which is a standard way to represent closed subspaces in computable analysis, then the dimension does computably depend on the distance function.}},
  author       = {{Ziegler, Martin and Brattka, Vasco}},
  booktitle    = {{SOFSEM 2000: Theory and Practice of Informatics}},
  isbn         = {{9783540413486}},
  issn         = {{0302-9743}},
  pages        = {{450--458}},
  publisher    = {{Springer}},
  title        = {{{Computing the Dimension of Linear Subspaces}}},
  doi          = {{10.1007/3-540-44411-4_34}},
  volume       = {{1963}},
  year         = {{2000}},
}

@inproceedings{18150,
  abstract     = {{What is the minimum number of hyperplanes that slice all edges of the d-dimensional hypercube? The answers have been known for d<=4.<br>This work settles the problem for d=5 and d=6. More precisely, a computer search implies that 4 hyperplanes do not suffice for this purpose (but 5 do).<br>We also develop computational approaches for attacking this extremal problem from combinatorial geometry in higher dimensions. They allow us to determine for example all maximal sliceable subsets of hypercube edges up to dimension 7.}},
  author       = {{Ziegler, Martin and Sohler, Christian}},
  booktitle    = {{Proceedings of the 12th Canadian Conference on Computational Geometry (CCCG'00)}},
  pages        = {{73--79}},
  title        = {{{Computing Cut Numbers}}},
  year         = {{2000}},
}

@article{18446,
  abstract     = {{We consider comparator networks M that are used repeatedly: while the output produced by M is not sorted, it is fed again into M. Sorting algorithms working in this way are called periodic. The number of parallel steps performed during a single run of M is called its period, the sorting time of M is the total number of parallel steps that are necessary to sort in the worst case. Periodic sorting networks have the advantage that they need little hardware (control logic, wiring, area) and that they are adaptive. We are interested in comparator networks of a constant period, due to their potential applications in hardware design.

Previously, very little was known on such networks. The fastest solutions required time O(nε) where the depth was roughly 1/ε. We introduce a general method called periodification scheme that converts automatically an arbitrary sorting network that sorts n items in time T(n) and that has layout area A(n) into a sorting network that has period 5, sorts ***(n • T(n) items in time O(T(<n)• log n), and has layout area O(A(n)) • T(n)). In particular, applying this scheme to Batcher's algorithms, we get practical period 5 comparator networks that sort in time O(log3n). For theoretical interest, one may use the AKS netork resulting in a period 5 comparator network with runtime O(log2n).}},
  author       = {{Lorys, Krzysztof and Wanka, Rolf and Oesterdiekhoff, Brigitte and Kutylowski, Miroslaw}},
  journal      = {{Journal of the ACM}},
  pages        = {{944--967}},
  title        = {{{Periodification Scheme: Constructing Sorting Networks with Constant Period}}},
  doi          = {{10.1145/355483.355490}},
  volume       = {{45}},
  year         = {{2000}},
}

@inproceedings{2211,
  author       = {{Czumaj, Artur and Scheideler, Christian}},
  booktitle    = {{32nd ACM Symposium on Theory of Computing}},
  pages        = {{38--47}},
  title        = {{{A New Algorithmic Approach to the General Lovász Local Lemma with Applications to Scheduling and Satisfiability Problems }}},
  year         = {{2000}},
}

@inproceedings{16495,
  author       = {{Meyer auf der Heide, Friedhelm and Räcke, Harald and Westermann, Matthias}},
  booktitle    = {{Proceedings of the twelfth annual ACM symposium on Parallel algorithms and architectures  - SPAA '00}},
  isbn         = {{1581131852}},
  title        = {{{Data management in hierarchical bus networks}}},
  doi          = {{10.1145/341800.341814}},
  year         = {{2000}},
}

@inproceedings{16496,
  abstract     = {{We present a general framework for the development of on-line algorithms for data management in networks with limited memory capacities. These algorithms dynamically create and delete copies of shared data objects that can be read and written by the nodes in the network. Our algorithms aim to minimize the congestion, i.e., the maximum communication load over all network resources, so that that none of these resources become a communication bottleneck. We give several examples of networks for which our framework yields efficient algorithms, including meshes, fat-trees, hypercubic networks, and complete networks. For example, our framework yields an $O(d cdot log n)$-competitive caching algorithm for $d$-dimensional meshes of size $n$ with respect to the congestion on the communication links, and an $O(1)$-competitive algorithms for complete networks with respect to the congestion at the memory modules due to remote accesses. Previous work on data management in networks either neglects memory capacity constraints or minimizes only the total communication load, i.e., the sum of the communication load over all resources, which may produce congestion as some of the links become bottlenecks.}},
  author       = {{Meyer auf der Heide, Friedhelm and Vöcking, Berthold and Westermann, Matthias}},
  booktitle    = {{SODA '00: Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}},
  isbn         = {{0898714532}},
  pages        = {{430–439}},
  title        = {{{Caching in networks}}},
  year         = {{2000}},
}

@inbook{16497,
  author       = {{Meyer auf der Heide, Friedhelm and Kutyłowski, Mirosław and Ragde, Prabhakar}},
  booktitle    = {{Euro-Par 2000 Parallel Processing}},
  isbn         = {{9783540679561}},
  issn         = {{0302-9743}},
  title        = {{{Complexity Theory and Algorithms}}},
  doi          = {{10.1007/3-540-44520-x_59}},
  year         = {{2000}},
}

@article{17010,
  author       = {{Czumaj, Artur and Meyer auf der Heide, Friedhelm and Stemann, Volker}},
  issn         = {{0097-5397}},
  journal      = {{SIAM Journal on Computing}},
  pages        = {{1703--1739}},
  title        = {{{Contention Resolution in Hashing Based Shared Memory Simulations}}},
  doi          = {{10.1137/s009753979529564x}},
  year         = {{2000}},
}

@article{16345,
  author       = {{Meyer auf der Heide, Friedhelm and Wanka, Rolf}},
  journal      = {{ForschungsForum Paderborn}},
  pages        = {{112--116}},
  title        = {{{Von der Hollerith-Maschine zum Parallelrechner - Die alltägliche Aufgabe des Sortierens als Fortschrittsmotor für die Informatik}}},
  year         = {{2000}},
}

@misc{19784,
  author       = {{Scheideler, Christian}},
  isbn         = {{3-931466-77-9}},
  title        = {{{Probabilistic Methods for Coordination Problems}}},
  year         = {{2000}},
}

@inproceedings{19732,
  abstract     = {{The Paderborn University BSP (PUB) library is a parallel C library based on the BSP model. The basic library supports buffered and unbuffered asynchronous communication between any pair of processors, and a mechanism for synchronizing the processors in a barrier style. In ad-dition, it provides routines for collective communication on arbitrary subsets of processors, partition operations, and a zero-cost synchronization mechanism. Furthermore, some techniques used in its implementation deviate significantly from the techniques used in other BSP libraries.}},
  author       = {{Bonorden, Olaf and Juurlink, Bernhardus and Von Otte, I. and Rieping, Ingo}},
  booktitle    = {{Proceedings 13th International Parallel Processing Symposium and 10th Symposium on Parallel and Distributed Processing}},
  isbn         = {{0769501435}},
  pages        = {{99--104}},
  title        = {{{The Paderborn university BSP (PUB) library-design, implementation and performance}}},
  doi          = {{10.1109/ipps.1999.760442}},
  year         = {{1999}},
}

@article{2151,
  author       = {{Flammini, Michele and Scheideler, Christian}},
  journal      = {{Theory Comput. Syst.}},
  number       = {{3}},
  pages        = {{387----420}},
  title        = {{{Simple, Efficient Routing Schemes for All-Optical Networks}}},
  doi          = {{10.1007/s002240000123}},
  volume       = {{32}},
  year         = {{1999}},
}

@inproceedings{2164,
  author       = {{Berenbrink, Petra and Scheideler, Christian}},
  booktitle    = {{SODA}},
  pages        = {{112----121}},
  title        = {{{Locally Efficient On-Line Strategies for Routing Packets Along Fixed Paths}}},
  year         = {{1999}},
}

@inproceedings{2165,
  author       = {{Berenbrink, Petra and Riedel, Marco and Scheideler, Christian}},
  booktitle    = {{SPAA}},
  pages        = {{33----42}},
  title        = {{{Simple Competitive Request Scheduling Strategies}}},
  year         = {{1999}},
}

@inproceedings{17864,
  abstract     = {{A geometric spanner with vertex set P in Rd is a sparse approximation of the complete Euclidean graph determined by P. We introduce the notion of partitioned neighborhood graphs (PNGs), unifying and generalizing most constructions of spanners treated in literature. Two important parameters characterizing their properties are the outdegree k in N and the stretch factor f>1 describing the quality of approximation. PNGs have been throughly investigated with respect to small values of f. We present in this work results about small values of k. The aim of minimizing k rather than f arises from two observations:

* k determines the amount of space required for storing PNGs.

* Many algorithms employing a (previously constructed) spanner have running times depending on its outdegree.

Our results include, for fixed dimensions d as well as asymptotically, upper and lower bounds on this optimal value of k. The upper bounds are shown constructively and yield efficient algorithms for actually computing the corresponding PNGs even in degenerate cases.
}},
  author       = {{Fischer, Matthias and Lukovszki, Tamas and Ziegler, Martin}},
  booktitle    = {{Proceedings of the 11th Canadian Conference on Computational Geometry}},
  title        = {{{Partitioned neighborhood spanners of minimal outdegree}}},
  year         = {{1999}},
}

@inproceedings{18747,
  abstract     = {{We present a new ( O(n) ) algorithm to compute good orders for the point set of a Delaunay triangulation of ( n ) points in the plane. Such a good order makes reconstruction in ( O(n) ) time with a simple algorithm possible. In contrast to the algorithm of Snoeyink and van Kreveld cite1, which is based on independent sets, our algorithm uses a breadth first search (BFS) to obtain these orders. Both approaches construct such orders by repeatedly removing a constant fraction of vertices from the current triangulation. The advantage of the BFS approach is that we can give significantly better bounds on the fraction of removed points in a phase of the algorithm. We can prove that a single phase of our algorithm removes at least ( frac13 ) of the points, even if we restrict the degree of the points (at the time they are removed) to 6. We implemented and compared both algorithms. Our algorithms is slightly faster and achieves about 15% better vertex data compression when using a simple variable length code to encode the differences between two consecutive vertices of the given order.}},
  author       = {{Sohler, Christian}},
  booktitle    = {{Proceedings of the 11th Canadian Conference on Computational Geometry ( CCCG'99)}},
  pages        = {{136--141}},
  title        = {{{Fast Reconstruction of Delaunay Triangulations}}},
  year         = {{1999}},
}

@phdthesis{18942,
  author       = {{Lukovszki, Tamás}},
  isbn         = {{3-931466-62-0 }},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}},
  title        = {{{New Results on Geometric Spanners and Their Applications}}},
  volume       = {{63}},
  year         = {{1999}},
}

