@article{6474, author = {{Büchter, K. and Gramlinger, F. and Kremer, H.-Hugo and Tenberg , R. and Tramm, T.}}, journal = {{bwp@ Berufs- und Wirtschaftspädagogik - online, Jubiläumsausgabe}}, pages = {{1--11}}, title = {{{bwp@ Berufs- und Wirtschaftspädagogik - online 2011-2011: Verortung, Selbstverständnis und Entwicklung}}}, year = {{2011}}, } @misc{648, author = {{Brandes, Philipp}}, publisher = {{Universität Paderborn}}, title = {{{Robust Distributed Computation in Dynamic Networks}}}, year = {{2011}}, } @article{649, abstract = {{Today, the concept of service oriented architectures provides a way of building integrated solutions out of existing services. To this end, services from different providers are composed using advanced orchestration and choreography techniques. However, while this principle allows for greater flexibility at a smaller cost, the use of third party services also includes a risk: Deployed services might not work as claimed by their providers. In this paper, we propose a technique for analyzing the expected reliability of service compositions based on ratings given by (previous) service users. Every service thereby comes with a reputation, and the analysis computes an overall reliability of a service composition from the reputations of its constituent services. The proposed model-driven approach proceeds by translating statechart models of service compositions into input for a probabilistic model checker (PRISM) using state-of-the-art model transformations techniques. The approach has been implemented as an Eclipse plug-in and is fully compliant with UML.}}, author = {{Besova, Galina and Wehrheim, Heike and Wagner, Annika}}, journal = {{Electronic Notes in Theoretical Computer Science}}, number = {{2}}, pages = {{3--16}}, publisher = {{Elsevier}}, title = {{{Reputation-based Reliability Prediction of Service Compositions}}}, doi = {{10.1016/j.entcs.2011.11.008}}, year = {{2011}}, } @misc{650, author = {{Platenius, Marie Christin}}, publisher = {{Universität Paderborn}}, title = {{{Reengineering of Design Deficiencies in Component-Based Software Architectures}}}, year = {{2011}}, } @misc{651, author = {{Stroh-Maraun, Nadja}}, publisher = {{Universität Paderborn}}, title = {{{Prozedurale Ansätze zur Lösung mehrdimensionaler Verhandlungsprobleme}}}, year = {{2011}}, } @inproceedings{652, abstract = {{In the development process of service-oriented systems, business process models are used at different levels. Typically, high-level business process models that describe business requirements and needs are stepwise refined to the IT level by different business modelers and software architects. As a result, different process model versions must be compared and merged by means of model version control. An important prerequisite for process model version control is an elaborated matching approach that results in precise mappings between different process model versions. The challenge of such an approach is to deal with syntactically different process models that are semantically equivalent. For that purpose, matching techniques must consider the semantics of process modeling languages.In this paper, we present a matching approach for process models in a versioning scenario. Based on a term formalization of process models, we enable an efficient and effective way to match syntactically different but semantically equivalent process models resulting in precise mappings.}}, author = {{Gerth, Christian and Luckey, Markus and Küster, Jochen and Engels, Gregor}}, booktitle = {{Proceedings of the IEEE 8th International Conference on Service Computingt (SCC)}}, pages = {{218----225}}, title = {{{Precise Mappings between Business Process Models in Versioning Scenarios}}}, doi = {{10.1109/SCC.2011.65}}, year = {{2011}}, } @inproceedings{653, abstract = {{Performance prototyping is an often used technique to assess the performance of software architectures early in the development process without relying on models of the system under study. ProtoCom is a prototype generator for the PCM realised as model-2-text transformation for which no experience report in a larger, virtualised setting exists. In this paper, we report on four case studies performed with an improved version of ProtoCom and report on the results gained with respect to analysis accuracy and usability. Our results demonstrate that the new version is much easier to use than previous versions and that results gained in our virtualised execution environment help in early assessments of performance under realistic conditions.}}, author = {{Lehrig, Sebastian and Zolynski, Thomas}}, booktitle = {{Proceedings of the Palladio Days 2011}}, pages = {{15--22}}, title = {{{Performance Prototyping with ProtoCom in a Virtualised Environment: A Case Study}}}, doi = {{10.5445/IR/1000025188 }}, year = {{2011}}, } @inproceedings{654, abstract = {{Research on peer-to-peer (p2p) and distributed systems needs evaluation tools to predict and observe the behavior of protocols and mechanisms in large scale networks. PeerfactSim.KOM is a simulator for large scale distributed/p2p systems aiming at the evaluation of interdependencies in multi-layered p2p systems. The simulator is written in Java, is event-based and mainly used in p2p research projects. The main development of PeerfactSim.KOM started in 2005 and is driven since 2006 by the project “QuaP2P”,which aims at the systematic improvement and benchmarking of p2p systems. Further users of the simulator are working in the project “On-the-fly Computing” aiming at researching p2p-based service oriented architectures. Both projects state severe requirements on the evaluation of multi-layered and large-scale distributed systems. We describe the architecture of PeerfactSim.KOM supporting these requirements in Section II, present the workflow, selected experiences and lessons learned in Section III and conclude the overview in Section IV.}}, author = {{Graffi, Kalman}}, booktitle = {{Proceedings of the IEEE International Conference on Peer-to-Peer Computing (IEEE PsP)}}, pages = {{154--155}}, title = {{{PeerfactSim.KOM: A PSP System Simulator - Experiences and Lessons Learned}}}, doi = {{10.1109/P2P.2011.6038673}}, year = {{2011}}, } @misc{655, author = {{Meyer, Joachim}}, publisher = {{Universität Paderborn}}, title = {{{Modellgetriebene Skalierbarkeitsanalyse von selbst-adaptiven Komponentenbasierten Softwaresystemen in der Cloud}}}, year = {{2011}}, } @inproceedings{657, abstract = {{We present two distributed, constant factor approximation algorithms for the metric facility location problem. Both algorithms have been designed with a strong emphasis on applicability in the area of wireless sensor networks: in order to execute them, each sensor node only requires limited local knowledge and simple computations. Also, the algorithms can cope with measurement errors and take into account that communication costs between sensor nodes do not necessarily increase linearly with the distance, but can be represented by a polynomial. Since it cannot always be expected that sensor nodes execute algorithms in a synchronized way, our algorithms are executed in an asynchronous model (but they are still able to break symmetry that might occur when two neighboring nodes act at exactly the same time). Furthermore, they can deal with dynamic scenarios: if a node moves, the solution is updated and the update affects only nodes in the local neighborhood. Finally, the algorithms are robust in the sense that incorrect behavior of some nodes during some round will, in the end, still result in a good approximation. The first algorithm runs in expected O(log_{1+\epsilon} n) communication rounds and yields a \my^4(1+4\my^2(1+\epsilon)^{1/p})^p approximation, while the second has a running time of expected O(log^2_{1+\epsilon} n) communication rounds and an approximation factor of \my^4(1 + 2(1 + \epsilon)^{1/p})^p. Here, \epsilon > 0 is an arbitrarily small constant, p the exponent of the polynomial representing the communication costs, and \my the relative measurement error.}}, author = {{Abshoff, Sebastan and Cord-Landwehr, Andreas and Degener, Bastian and Kempkes, Barbara and Pietrzyk, Peter}}, booktitle = {{Proceedings of the 7th International Symposium on Algorithms for Sensor Systems, Wireless Ad Hoc Networks and Autonomous Mobile Entities (ALGOSENSORS)}}, pages = {{13--27}}, title = {{{Local Approximation Algorithms for the Uncapacitated Metric Facility Location Problem in Power-Aware Sensor Networks}}}, doi = {{10.1007/978-3-642-28209-6_3}}, year = {{2011}}, } @misc{658, author = {{Schremmer, Alexander}}, publisher = {{Universität Paderborn}}, title = {{{Function Specification Inference Using Craig Interpolation}}}, year = {{2011}}, } @misc{659, author = {{Liske, Gennadij}}, publisher = {{Universität Paderborn}}, title = {{{Fault attacks in pairing-based cryptography}}}, year = {{2011}}, } @misc{660, author = {{Peuster, Manuel}}, publisher = {{Universität Paderborn}}, title = {{{Defining and Deploying Complex Applicances in Multi-Site Cloud Environments}}}, year = {{2011}}, } @misc{661, author = {{Arifulina, Svetlana}}, publisher = {{Universität Paderborn}}, title = {{{Coverage Criteria for Testing DMM Specifications}}}, year = {{2011}}, } @inproceedings{662, abstract = {{We present Corona, a deterministic self-stabilizing algorithm for skip list construction in structured overlay networks. Corona operates in the low-atomicity message-passing asynchronous system model. Corona requires constant process memory space for its operation and, therefore, scales well. We prove the general necessary conditions limiting the initial states from which a self-stabilizing structured overlay network in message-passing system can be constructed. The conditions require that initial state information has to form a weakly connected graph and it should only contain identiers that are present in the system. We formally describe Corona and rigorously prove that it stabilizes from an arbitrary initial state subject to the necessary conditions. We extend Corona to construct a skip graph.}}, author = {{Nesterenko, Mikhail and Mohd, Rizal and Scheideler, Christian}}, booktitle = {{Proceedings of the 13th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}}, pages = {{356----370}}, title = {{{Corona: A Stabilizing Deterministic Message-Passing Skip List}}}, doi = {{10.1007/978-3-642-24550-3_27}}, year = {{2011}}, } @misc{663, author = {{Swierkot, Kamil}}, publisher = {{Universität Paderborn}}, title = {{{Complexity Classes for Local Computation}}}, year = {{2011}}, } @inproceedings{664, abstract = {{Web Computing is a variant of parallel computing where the idle times of PCs donated by worldwide distributed users are employed to execute parallel programs. The PUB-Web library developed by us supports this kind of usage of computing resources. A major problem for the efficient execution of such parallel programs is load balancing. In the Web Computing context, this problem becomes more difficult because of the dynamic behavior of the underlying "parallel computer": the set of available processors (donated PCs) as well as their availability (idle times) change over time in an unpredictable fashion.In this paper, we experimentally evaluate and compare load balancing algorithms in this scenario, namely a variant of the well-established Work Stealing algorithm and strategies based on a heterogeneous version of distributed hash-tables (DHHTs) introduced recently. In order to run a meaningful experimental evaluation, we employ, in addition to our Web Computing library PUB-Web, realistic data sets for the job input streams and for the dynamics of the availability of the resources.Our experimental evaluations suggest that Work Stealing is the better strategy if the number of processes ready to run matches the number of available processors. But a suitable variant of DHHTs outperforms Work Stealing if there are significantly more processes ready to run than available processors.}}, author = {{Gehweiler, Joachim and Kling, Peter and Meyer auf der Heide, Friedhelm}}, booktitle = {{Proceedings of the 9th International Conference on Parallel Processing and Applied Mathematics (PPAM)}}, pages = {{31----40}}, title = {{{An Experimental Comparison of Load Balancing Strategies in a Web Computing Environment}}}, doi = {{10.1007/978-3-642-31500-8_4}}, year = {{2011}}, } @misc{665, author = {{Wette, Philip}}, publisher = {{Universität Paderborn}}, title = {{{Adaptives Loadbalancing für strukturierte Peer-to-Peer-Netzwerke am Beispiel von Chord}}}, year = {{2011}}, } @inproceedings{666, abstract = {{Reconfigurable systems on chip are increasingly deployed in security and safety critical contexts. When downloading and configuring new hardware functions, we want to make sure that modules adhere to certain security specifications and do not, for example, contain hardware Trojans. As a possible approach to achieving hardware security we propose and demonstrate the concept of proof-carrying hardware, a concept inspired by previous work on proof-carrying code techniques in the software domain. In this paper, we discuss the hardware trust and threat models behind proof-carrying hardware and then present our experimental setup. We detail the employed open-source tool chain for the runtime verification of combinational equivalence and our bitstream format for an abstract FPGA architecture that allows us to experimentally validate the feasibility of our approach.}}, author = {{Drzevitzky, Stephanie and Platzner, Marco}}, booktitle = {{Proceedings of the 6th International Workshop on Reconfigurable Communication-centric Systems-on-Chip (ReCoSoC)}}, pages = {{58--65}}, title = {{{Achieving Hardware Security for Reconfigurable Systems on Chip by a Proof-Carrying Code Approach}}}, doi = {{10.1109/ReCoSoC.2011.5981499}}, year = {{2011}}, } @proceedings{667, editor = {{Meyer auf der Heide, Friedhelm and Rajaraman, Rajmohan }}, title = {{{23rd Annual ACM Symposium on Parallelism in Algorithms and Architectures}}}, doi = {{10.1145/1989493}}, year = {{2011}}, }