@inproceedings{653,
  abstract     = {{Performance prototyping is an often used technique to assess the performance of software architectures early in the development process without relying on models of the system under study. ProtoCom is a prototype generator for the PCM realised as model-2-text transformation for which no experience report in a larger, virtualised setting exists. In this paper, we report on four case studies performed with an improved version of ProtoCom and report on the results gained with respect to analysis accuracy and usability. Our results demonstrate that the new version is much easier to use than previous versions and that results gained in our virtualised execution environment help in early assessments of performance under realistic conditions.}},
  author       = {{Lehrig, Sebastian and Zolynski, Thomas}},
  booktitle    = {{Proceedings of the Palladio Days 2011}},
  pages        = {{15--22}},
  title        = {{{Performance Prototyping with ProtoCom in a Virtualised Environment: A Case Study}}},
  doi          = {{10.5445/IR/1000025188 }},
  year         = {{2011}},
}

@inproceedings{654,
  abstract     = {{Research on peer-to-peer (p2p) and distributed systems needs evaluation tools to predict and observe the behavior of protocols and mechanisms in large scale networks. PeerfactSim.KOM is a simulator for large scale distributed/p2p systems aiming at the evaluation of interdependencies in multi-layered p2p systems. The simulator is written in Java, is event-based and mainly used in p2p research projects. The main development of PeerfactSim.KOM started in 2005 and is driven since 2006 by the project “QuaP2P”,which aims at the systematic improvement and benchmarking of p2p systems. Further users of the simulator are working in the project “On-the-ﬂy Computing” aiming at researching p2p-based service oriented architectures. Both projects state severe requirements on the evaluation of multi-layered and large-scale distributed systems. We describe the architecture of PeerfactSim.KOM supporting these requirements in Section II, present the workﬂow, selected experiences and lessons learned in Section III and conclude the overview in Section IV.}},
  author       = {{Graffi, Kalman}},
  booktitle    = {{Proceedings of the IEEE International Conference on Peer-to-Peer Computing (IEEE PsP)}},
  pages        = {{154--155}},
  title        = {{{PeerfactSim.KOM: A PSP System Simulator - Experiences and Lessons Learned}}},
  doi          = {{10.1109/P2P.2011.6038673}},
  year         = {{2011}},
}

@misc{655,
  author       = {{Meyer, Joachim}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Modellgetriebene Skalierbarkeitsanalyse von selbst-adaptiven Komponentenbasierten Softwaresystemen in der Cloud}}},
  year         = {{2011}},
}

@inproceedings{657,
  abstract     = {{We present two distributed, constant factor approximation algorithms for the metric facility location problem. Both algorithms have been designed with a strong emphasis on applicability in the area of wireless sensor networks: in order to execute them, each sensor node only requires limited local knowledge and simple computations. Also, the algorithms can cope with measurement errors and take into account that communication costs between sensor nodes do not necessarily increase linearly with the distance, but can be represented by a polynomial. Since it cannot always be expected that sensor nodes execute algorithms in a synchronized way, our algorithms are executed in an asynchronous model (but they are still able to break symmetry that might occur when two neighboring nodes act at exactly the same time). Furthermore, they can deal with dynamic scenarios: if a node moves, the solution is updated and the update affects only nodes in the local neighborhood. Finally, the algorithms are robust in the sense that incorrect behavior of some nodes during some round will, in the end, still result in a good approximation. The first algorithm runs in expected O(log_{1+\epsilon} n) communication rounds and yields a \my^4(1+4\my^2(1+\epsilon)^{1/p})^p approximation, while the second has a running time of expected O(log^2_{1+\epsilon} n) communication rounds and an approximation factor of \my^4(1 + 2(1 + \epsilon)^{1/p})^p. Here, \epsilon > 0 is an arbitrarily small constant, p the exponent of the polynomial representing the communication costs, and \my the relative measurement error.}},
  author       = {{Abshoff, Sebastan and Cord-Landwehr, Andreas and Degener, Bastian and Kempkes, Barbara  and Pietrzyk, Peter}},
  booktitle    = {{Proceedings of the 7th International Symposium on Algorithms for Sensor Systems, Wireless Ad Hoc Networks and Autonomous Mobile Entities (ALGOSENSORS)}},
  pages        = {{13--27}},
  title        = {{{Local Approximation Algorithms for the Uncapacitated Metric Facility Location Problem in Power-Aware Sensor Networks}}},
  doi          = {{10.1007/978-3-642-28209-6_3}},
  year         = {{2011}},
}

@misc{658,
  author       = {{Schremmer, Alexander}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Function Specification Inference Using Craig Interpolation}}},
  year         = {{2011}},
}

@misc{659,
  author       = {{Liske, Gennadij}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Fault attacks in pairing-based cryptography}}},
  year         = {{2011}},
}

@misc{660,
  author       = {{Peuster, Manuel}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Defining and Deploying Complex Applicances in Multi-Site Cloud Environments}}},
  year         = {{2011}},
}

@misc{661,
  author       = {{Arifulina, Svetlana}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Coverage Criteria for Testing DMM Specifications}}},
  year         = {{2011}},
}

@inproceedings{662,
  abstract     = {{We present Corona, a deterministic self-stabilizing algorithm for skip list construction in structured overlay networks. Corona operates in the low-atomicity message-passing asynchronous system model. Corona requires constant process memory space for its operation and, therefore, scales well. We prove the general necessary conditions limiting the initial states from which a self-stabilizing structured overlay network in message-passing system can be constructed. The conditions require that initial state information has to form a weakly connected graph and it should only contain identiers that are present in the system. We formally describe Corona and rigorously prove that it stabilizes from an arbitrary initial state subject to the necessary conditions. We extend Corona to construct a skip graph.}},
  author       = {{Nesterenko, Mikhail and Mohd, Rizal and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 13th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}},
  pages        = {{356----370}},
  title        = {{{Corona: A Stabilizing Deterministic Message-Passing Skip List}}},
  doi          = {{10.1007/978-3-642-24550-3_27}},
  year         = {{2011}},
}

@misc{663,
  author       = {{Swierkot, Kamil}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Complexity Classes for Local Computation}}},
  year         = {{2011}},
}

@inproceedings{664,
  abstract     = {{Web Computing is a variant of parallel computing where the idle times of PCs donated by worldwide distributed users are employed to execute parallel programs. The PUB-Web library developed by us supports this kind of usage of computing resources. A major problem for the efficient execution of such parallel programs is load balancing. In the Web Computing context, this problem becomes more difficult because of the dynamic behavior of the underlying "parallel computer": the set of available processors (donated PCs) as well as their availability (idle times) change over time in an unpredictable fashion.In this paper, we experimentally evaluate and compare load balancing algorithms in this scenario, namely a variant of the well-established Work Stealing algorithm and strategies based on a heterogeneous version of distributed hash-tables (DHHTs) introduced recently. In order to run a meaningful experimental evaluation, we employ, in addition to our Web Computing library PUB-Web, realistic data sets for the job input streams and for the dynamics of the availability of the resources.Our experimental evaluations suggest that Work Stealing is the better strategy if the number of processes ready to run matches the number of available processors. But a suitable variant of DHHTs outperforms Work Stealing if there are significantly more processes ready to run than available processors.}},
  author       = {{Gehweiler, Joachim and Kling, Peter and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the 9th International Conference on Parallel Processing and Applied Mathematics (PPAM)}},
  pages        = {{31----40}},
  title        = {{{An Experimental Comparison of Load Balancing Strategies in a Web Computing Environment}}},
  doi          = {{10.1007/978-3-642-31500-8_4}},
  year         = {{2011}},
}

@misc{665,
  author       = {{Wette, Philip}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Adaptives Loadbalancing für strukturierte Peer-to-Peer-Netzwerke am Beispiel von Chord}}},
  year         = {{2011}},
}

@inproceedings{666,
  abstract     = {{Reconﬁgurable systems on chip are increasingly deployed in security and safety critical contexts. When downloading and conﬁguring new hardware functions, we want to make sure that modules adhere to certain security speciﬁcations and do not, for example, contain hardware Trojans. As a possible approach to achieving hardware security we propose and demonstrate the concept of proof-carrying hardware, a concept inspired by previous work on proof-carrying code techniques in the software domain. In this paper, we discuss the hardware trust and threat models behind proof-carrying hardware and then present our experimental setup. We detail the employed open-source tool chain for the runtime veriﬁcation of combinational equivalence and our bitstream format for an abstract FPGA architecture that allows us to experimentally validate the feasibility of our approach.}},
  author       = {{Drzevitzky, Stephanie and Platzner, Marco}},
  booktitle    = {{Proceedings of the 6th International Workshop on Reconfigurable Communication-centric Systems-on-Chip (ReCoSoC)}},
  pages        = {{58--65}},
  title        = {{{Achieving Hardware Security for Reconﬁgurable Systems on Chip by a Proof-Carrying Code Approach}}},
  doi          = {{10.1109/ReCoSoC.2011.5981499}},
  year         = {{2011}},
}

@proceedings{667,
  editor       = {{Meyer auf der Heide, Friedhelm and Rajaraman, Rajmohan }},
  title        = {{{23rd Annual ACM Symposium on Parallelism in Algorithms and Architectures}}},
  doi          = {{10.1145/1989493}},
  year         = {{2011}},
}

@techreport{668,
  abstract     = {{The objective of this study is the analysis of movie success mechanisms in a genre-specific context. Instead of the examination of all time box office champions, we focus on the two film genres of computer animated and comic book based films. By introducing the concept of the motion-picture marketing mix, which represents a set of tactical marketing tools in order to strengthen a company’s strategic customer orientation, we are able to systematically identify key movie success factors. We conduct a cross-sectional empirical analysis across regional distinctions based on dataset that covers a time horizon of more than 30 years. We find empirical evidence that actors with ex ante popularity, award nominations and the production budget represent key movie success mechanisms and significantly influence a movie’s commercial appeal. Additionally, word-of-mouth creates reputation effects that also significantly affects box office gross.}},
  author       = {{Kaimann, Daniel}},
  publisher    = {{Universität Paderborn}},
  title        = {{{"To infinity and beyond!" - A genre-specific film analysis of movie success mechanisms}}},
  year         = {{2011}},
}

@inproceedings{6740,
  author       = {{Böttcher, Stefan and Bokermann, Dennis and Hartel, Rita}},
  booktitle    = {{Advances in Databases - 28th British National Conference on Databases, BNCOD 28, Revised Selected Papers}},
  pages        = {{209--220}},
  publisher    = {{Springer Berlin/Heidelberg}},
  title        = {{{Computing Compressed XML Data from Relational Databases}}},
  volume       = {{7051}},
  year         = {{2011}},
}

@book{6755,
  editor       = {{Tönnies, Merle and Flotmann, Christina}},
  publisher    = {{WVT}},
  title        = {{{Narrative in Drama}}},
  year         = {{2011}},
}

@inproceedings{6833,
  author       = {{Bendfeld, Jörg and Krauter, Stefan}},
  booktitle    = {{Wissenschaftliche Verhandlungen 2011 der Deutschen Physikalischen Gesellschaft,  Dresden, März 2011 }},
  title        = {{{Meteorological measurements in the Baltic and the North Sea}}},
  year         = {{2011}},
}

@inproceedings{6834,
  author       = {{Bendfeld, Jörg and Krauter, Stefan}},
  booktitle    = {{Wissenschaftliche Verhandlungen 2011 der Deutschen Physikalischen Gesellschaft,  Dresden, März 2011}},
  title        = {{{Hybrid measuring devices for evaluation of waves and currents}}},
  year         = {{2011}},
}

@inproceedings{6835,
  author       = {{Bendfeld, Jörg}},
  booktitle    = {{Proceedings of the Decentralized Power Systems (DPS) 2011, Paderborn, Germany, 2. September 2011}},
  title        = {{{Increased availability of offshore wind power by distributed biogas plants}}},
  year         = {{2011}},
}

