@inproceedings{2392,
  author       = {{Woehrle, Matthias and Plessl, Christian and Beutel, Jan and Thiele, Lothar}},
  booktitle    = {{Proc. Workshop on Embedded Networked Sensors (EmNets)}},
  isbn         = {{978-1-59593-694-3}},
  keywords     = {{WSN, testing, distributed, embedded}},
  pages        = {{93--97}},
  publisher    = {{ACM}},
  title        = {{{Increasing the Reliability of Wireless Sensor Networks with a Distributed Testing Framework}}},
  doi          = {{10.1145/1278972.1278996}},
  year         = {{2007}},
}

@inproceedings{2393,
  author       = {{Beutel, Jan and Dyer, Matthias and Lim, Roman and Plessl, Christian and Woehrle, Matthias and Yuecel, Mustafa and Thiele, Lothar}},
  booktitle    = {{Proc. Int. Conf. Networked Sensing Systems (INSS)}},
  isbn         = {{1-4244-1231-5}},
  keywords     = {{WSN, testing, verification}},
  pages        = {{303--303}},
  publisher    = {{IEEE}},
  title        = {{{Automated Wireless Sensor Network Testing}}},
  doi          = {{10.1109/INSS.2007.4297445}},
  year         = {{2007}},
}

@techreport{2518,
  author       = {{Valentin, Stefan and von Malm, Holger and Karl, Holger}},
  title        = {{{Evaluating the GNU Software Radio platform for wireless testbeds}}},
  year         = {{2006}},
}

@inproceedings{2523,
  author       = {{Valentin, Stefan and Lichte, Herman S. and Karl, Holger and Vivier, Guillaume  and  Simoens, Sebastien and Vidal, Josep and Agustin, Adrian and Aad, Imad}},
  title        = {{{Cooperative wireless networking beyond store-and-forward: Perspectives for PHY and MAC design}}},
  year         = {{2006}},
}

@inproceedings{2526,
  author       = {{Dannewitz, Christian and Berg, Stefan and Karl, Holger}},
  booktitle    = {{ Proc. of the Wireless World Research Forum Meeting }},
  title        = {{{An IEEE 802.21-based Universal Information Service }}},
  year         = {{2006}},
}

@phdthesis{19612,
  author       = {{Klein, Jan}},
  isbn         = {{3-939350-05-2}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}},
  title        = {{{Efficient Collision Detection for Point and Polygon Based Models}}},
  volume       = {{186}},
  year         = {{2006}},
}

@phdthesis{19613,
  author       = {{Rührup, Stefan}},
  isbn         = {{978-3-939350-22-4}},
  publisher    = {{Verlagsschriftenreihe des Heinz Nixdorf Instituts, Paderborn}},
  title        = {{{Position-based Routing Strategies}}},
  volume       = {{203}},
  year         = {{2006}},
}

@inproceedings{19690,
  author       = {{Briest, Patrick and Krysta, Piotr}},
  booktitle    = {{Proceedings of the 17th ACM-SIAM Symposium on Discrete Algorithms (SODA)}},
  title        = {{{Single-Minded Unlimited Supply Pricing on Sparse Instances}}},
  year         = {{2006}},
}

@inproceedings{19691,
  author       = {{Briest, Patrick and Gunia, Christian}},
  booktitle    = {{Proceedings of the 17th International Symposium on Algorithms and Computation (ISAAC)}},
  title        = {{{Energy-Efficient Broadcast Scheduling for Speed-Controlled Transmission Channels}}},
  year         = {{2006}},
}

@misc{19718,
  author       = {{Degener, Bastian}},
  title        = {{{Die Analyse von Estimation-of-Distribution-Algorithmen auf ausgewählten Funktionen}}},
  year         = {{2006}},
}

@inproceedings{19808,
  abstract     = {{We study the problem of designing an adaptive hash table for redundant data storage in a system of storage devices with arbitrary capacities. Ideally, such a hash table should make sure that (a) a storage device with x% of the available capacity should get x% of the data, (b) the copies of each data item are distributed among the storage devices so that no two copies are stored at the same device, and (c) only a near-minimum amount of data replacements is necessary to preserve (a) and (b) under any change in the system. Hash tables satisfying (a) and (c) are already known, and it is not difficult to construct hash tables satisfying (a) and (b). However, no hash table is known so far that can satisfy all three properties as long as this is in principle possible. We present a strategy called SPREAD that solves this problem for the first time. As long as (a) and (b) can in principle be satisfied, SPREAD preserves (a) for every storage device nearly optimal, with high probability, guarantees (b) for every data item, and only needs a constant factor more data replacements than minimum possible in order to preserve (a) and (b).}},
  author       = {{Mahlmann, Peter and Schindelhauer, Christian}},
  booktitle    = {{Proceedings of the eighteenth annual ACM symposium on Parallelism in algorithms and architectures  - SPAA '06}},
  isbn         = {{1595934529}},
  pages        = {{308----317}},
  title        = {{{Distributed random digraph transformations for peer-to-peer networks}}},
  doi          = {{10.1145/1148109.1148162}},
  year         = {{2006}},
}

@inproceedings{19838,
  author       = {{Rührup, Stefan and Schindelhauer, Christian}},
  booktitle    = {{Proc. of the 17th International Symposium on Algorithms and Computation (ISAAC 2006)}},
  isbn         = {{9783540496946}},
  issn         = {{0302-9743}},
  title        = {{{Online Multi-path Routing in a Maze}}},
  doi          = {{10.1007/11940128_65}},
  year         = {{2006}},
}

@inproceedings{19839,
  abstract     = {{Storage Area Networks commonly consist a more or less static set of n data servers that handle a dynamic collection of m documents. Such SANs can minimize the access time to documents by distributing each document among several servers, allowing users to access documents in parallel. This paper describes an efficient solution for providing parallel access to multiple hard disks for popular content. In extension to previous approaches we provide an efficient and elegant hash table data structure for utilizing the full capacity of each data server. Concerning the dynamics, documents as well as server may be added or removed from the system causing only local changes. We consider sequential and parallel access to data in the average case. For the average time model we present a fast optimal algorithm.}},
  author       = {{Schindelhauer, Christian and Schomaker, Gunnar}},
  booktitle    = {{5th International Conference on Networking (ICN)}},
  isbn         = {{0769525520}},
  title        = {{{SAN Optimal Multi Parameter Access Scheme}}},
  doi          = {{10.1109/icniconsmcl.2006.190}},
  year         = {{2006}},
}

@techreport{19840,
  author       = {{Rührup, Stefan and Schindelhauer, Christian}},
  title        = {{{Improved Bounds for Online Multi-Path Routing in Faulty Mesh Networks}}},
  year         = {{2006}},
}

@inproceedings{19854,
  abstract     = {{In previous publications there have been several proposals<br>regarding replica generation and placement of movie content in<br>content-distribution-networks or P2P overlays. Within this paper<br>we extend approaches for heterogeneous placement scenarios<br>described in prior publications. Therefor we presume heterogeneous<br>server peers' bandwidth, HD capacity, and movie popularities.<br>Movie documents are replicated and placed onto server peers with<br>respect to the predicted popularity values. Thus each document<br>aims to gain fair networks resources according to its popularity.<br>We present simulation results of heuristics of different placement<br>strategies and compare them with a near optimal technique.}},
  author       = {{Schomaker, Gunnar and Loeser, Christoph and Schubert, Matthias}},
  booktitle    = {{5th International Conference on Networking (ICN).}},
  title        = {{{Predictive Replication and Placement Strategies for Movie Documents in heterogeneous Content Delivery Networks}}},
  year         = {{2006}},
}

@inproceedings{19870,
  author       = {{Brinkmann, Andre and Effert, Sascha and Heidebuer, Michael and Vodisek, Mario}},
  booktitle    = {{5th International Conference on Networking (ICN)}},
  isbn         = {{0769525520}},
  title        = {{{Realizing Multilevel Snapshots in Dynamically Changing Virtualized Storage Environments}}},
  doi          = {{10.1109/icniconsmcl.2006.182}},
  year         = {{2006}},
}

@inproceedings{19932,
  abstract     = {{#hniid 2484}},
  author       = {{Kortenjan, Michael and Schomaker, Gunnar}},
  booktitle    = {{4th International Conference on Virtual Reality, Computer Graphics, Visualization and Interaction (Afrigraph 2006)}},
  title        = {{{Size equivalent cluster trees (SEC-Trees) realtime rendering of large industrial scenes}}},
  doi          = {{10.1145/1108590.1108608}},
  year         = {{2006}},
}

@article{2029,
  author       = {{Kolman, Petr and Scheideler, Christian}},
  journal      = {{J. Algorithms}},
  number       = {{1}},
  pages        = {{20----44}},
  title        = {{{Improved bounds for the unsplittable flow problem}}},
  doi          = {{10.1016/j.jalgor.2004.07.006}},
  volume       = {{61}},
  year         = {{2006}},
}

@phdthesis{2404,
  abstract     = {{ In this thesis, we propose to use a reconfigurable processor as main computation element in embedded systems for applications from the multi-media and communications domain. A reconfigurable processor integrates an embedded CPU core with a Reconfigurable Processing Unit (RPU). Many of our target applications require real-time signal-processing of data streams and expose a high computational demand. The key challenge in designing embedded systems for these applications is to find an implementation that satisfies the performance goals and is adaptable to new applications, while the system cost is minimized. Implementations that solely use an embedded CPU are likely to miss the performance goals. Application-Specific Integrated Circuit (ASIC)-based coprocessors can be used for some high-volume products with fixed functions, but fall short for systems with varying applications. We argue that a reconfigurable processor with a coarse-grained, dynamically reconfigurable array of modest size provides an attractive implementation platform for our application domain. The computational intensive application kernels are executed on the RPU, while the remaining parts of the application are executed on the CPU. Reconfigurable hardware allows for implementing application specific coprocessors with a high performance, while the function of the coprocessor can still be adapted due to the programmability. So far, reconfigurable technology is used in embedded systems primarily with static configurations, e.g., for implementing glue-logic, replacing ASICs, and for implementing fixed-function coprocessors. Changing the configuration at runtime enables a number of interesting application modes, e.g., on-demand loading of coprocessors and time-multiplexed execution of coprocessors, which is commonly denoted as hardware virtualization. While the use of static configurations is well understood and supported by design-tools, the role of dynamic reconfiguration is not well investigated yet. Current application specification methods and design-tools do not provide an end-to-end tool-flow that considers dynamic reconfiguration. A key idea of our approach is to reduce system cost by keeping the size of the reconfigurable array small and to use hardware virtualization techniques to compensate for the limited hardware resources. The main contribution of this thesis is the codesign of a reconfigurable processor architecture named ZIPPY, the corresponding hardware and software implementation tools, and an application specification model which explicitly considers hardware virtualization. The ZIPPY architecture is widely parametrized and allows for specifying a whole family of processor architectures. The implementation tools are also parametrized and can target any architectural variant. We evaluate the performance of the architecture with a system-level, cycle-accurate cosimulation framework. This framework enables us to perform design-space exploration for a variety of reconfigurable processor architectures. With two case studies, we demonstrate, that hardware virtualization on the Zippy architecture is feasible and enables us to trade-off performance for area in embedded systems. Finally, we present a novel method for optimal temporal partitioning of sequential circuits, which is an important form of hardware virtualization. The method based on Slowdown and Retiming allows us to decompose any sequential circuit into a number of smaller, communicating subcircuits that can be executed on a dynamically reconfigurable architecture. }},
  author       = {{Plessl, Christian}},
  isbn         = {{978-3-8322-5561-3}},
  keywords     = {{Zippy}},
  publisher    = {{Shaker Verlag}},
  title        = {{{Hardware virtualization on a coarse-grained reconfigurable processor}}},
  doi          = {{10.2370/9783832255619}},
  year         = {{2006}},
}

@article{2405,
  author       = {{Groppe, Sven and Böttcher, Stefan and Birkenheuer, Georg and Höing, André}},
  journal      = {{Data & Knowledge Engineering}},
  number       = {{1}},
  pages        = {{64--110}},
  publisher    = {{Elsevier}},
  title        = {{{Reformulating XPath queries and XSLT queries on XSLT views}}},
  doi          = {{10.1016/j.datak.2005.04.002}},
  volume       = {{57}},
  year         = {{2006}},
}

