@inproceedings{7535,
  author       = {{Böckelmann, Irina and Schenk, Daniel and Rößler, Thoralf and Adler, Simon and Senft, Björn and Grubert, Jens and Mecke, Rüdiger and Huckauf, Anke and Urbina, Mario and Tümler, Johannes and Darius, Sabine}},
  title        = {{{Physiologische Beanspruchungsreaktionen bei der Anwendung von kopfgetragenen AR-Displays}}},
  year         = {{2011}},
}

@inproceedings{7536,
  author       = {{Klompmaker, Florian and Senft, Björn and Nebe, Karsten and Busch, Clemens and Willemsen, Detlev}},
  booktitle    = {{{HEALTHINF} 2011 - Proceedings of the International Conference on Health Informatics, Rome, Italy, 26-29 January, 2011}},
  pages        = {{268--273}},
  title        = {{{User Centered Design Process of OSAMi-D - Developing User Interfaces for a Remote Ergometer Training Application}}},
  year         = {{2011}},
}

@inproceedings{8471,
  abstract     = {{Performance is an important quality attribute for business information systems. When a tester has spotted a performance error, the error is passed to the software developers to fix it. However, in component-based software development the tester has to do blame analysis first, i. e. the tester has to decide, which party is responsible to fix the error. If the error is a design or deployment issue, it can be assigned to the software architect or the system deployer. If the error is specific to a component, it needs to be assigned to the corresponding component developer. An accurate blame analysis is important, because wrong assignments of errors will cause a loss of time and money. Our approach aims at doing blame analysis for performance errors by comparing performance metrics obtained in performance testing and performance prediction. We use performance prediction values as expected values for individual components. For performance prediction we use the Palladio approach. By this means, our approach evaluates each component’s performance in a certain test case. If the component performs poorly, its component developer needs to fix the component or the architect replaces the component with a faster one. If no omponent performs poorly, we can deduce that there is a design or deployment issue and the architecture needs to be changed. In this paper, we present an exemplary blame analysis based on a web shop system. The example shows the feasibility of our approach.}},
  author       = {{Brüseke, Frank and Becker, Steffen and Engels, Gregor}},
  booktitle    = {{Proceedings of the 16th International Workshop on Component-Oriented Programming (WCOP; satellite event of the CompArch 2011), Boulder Colorado, CO (USA)}},
  pages        = {{25--32}},
  publisher    = {{ACM}},
  title        = {{{Palladio-based performance blame analysis}}},
  doi          = {{http://dx.doi.org/10.1145/2000292.2000298}},
  year         = {{2011}},
}

@inproceedings{8472,
  author       = {{Nagel, Benjamin }},
  booktitle    = {{Proceedings of the Software Engineering 2011 (SE 2011), Karlsruhe (Germany)}},
  publisher    = {{Gesellschaft für Informatik (GI)}},
  title        = {{{Semi-automatische Ableitung externer Anpassungsmechanismen für selbst-adaptive Systeme}}},
  year         = {{2011}},
}

@inproceedings{646,
  abstract     = {{This paper presents a dynamic overlay network based on the De Bruijn graph which we call Linearized De Bruijn (LDB) network. The LDB network has the advantage that it has a guaranteed constant node degree and that the routing between any two nodes takes at most O(log n) hops with high probability. Also, we show that there is a simple local-control algorithm that can recover the LDB network from any network topology that is weakly connected.}},
  author       = {{Richa, Andrea W. and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 13th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}},
  pages        = {{416--430}},
  title        = {{{Self-Stabilizing DeBruijn Networks}}},
  doi          = {{10.1007/978-3-642-24550-3_31}},
  year         = {{2011}},
}

@article{647,
  author       = {{Leuschel, Michael and Wehrheim, Heike}},
  journal      = {{Science of Computer Programming}},
  number       = {{10}},
  pages        = {{835----836}},
  publisher    = {{Elsevier}},
  title        = {{{Selected papers on Integrated Formal Methods (iFM09)}}},
  doi          = {{10.1016/j.scico.2011.01.009}},
  year         = {{2011}},
}

@misc{648,
  author       = {{Brandes, Philipp}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Robust Distributed Computation in Dynamic Networks}}},
  year         = {{2011}},
}

@article{649,
  abstract     = {{Today, the concept of service oriented architectures provides a way of building integrated solutions out of existing services. To this end, services from different providers are composed using advanced orchestration and choreography techniques. However, while this principle allows for greater flexibility at a smaller cost, the use of third party services also includes a risk: Deployed services might not work as claimed by their providers. In this paper, we propose a technique for analyzing the expected reliability of service compositions based on ratings given by (previous) service users. Every service thereby comes with a reputation, and the analysis computes an overall reliability of a service composition from the reputations of its constituent services. The proposed model-driven approach proceeds by translating statechart models of service compositions into input for a probabilistic model checker (PRISM) using state-of-the-art model transformations techniques. The approach has been implemented as an Eclipse plug-in and is fully compliant with UML.}},
  author       = {{Besova, Galina and Wehrheim, Heike and Wagner, Annika}},
  journal      = {{Electronic Notes in Theoretical Computer Science}},
  number       = {{2}},
  pages        = {{3--16}},
  publisher    = {{Elsevier}},
  title        = {{{Reputation-based Reliability Prediction of Service Compositions}}},
  doi          = {{10.1016/j.entcs.2011.11.008}},
  year         = {{2011}},
}

@misc{650,
  author       = {{Platenius, Marie Christin}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Reengineering of Design Deficiencies in Component-Based Software Architectures}}},
  year         = {{2011}},
}

@inproceedings{652,
  abstract     = {{In the development process of service-oriented systems, business process models are used at different levels. Typically, high-level business process models that describe business requirements and needs are stepwise refined to the IT level by different business modelers and software architects. As a result, different process model versions must be compared and merged by means of model version control. An important prerequisite for process model version control is an elaborated matching approach that results in precise mappings between different process model versions. The challenge of such an approach is to deal with syntactically different process models that are semantically equivalent. For that purpose, matching techniques must consider the semantics of process modeling languages.In this paper, we present a matching approach for process models in a versioning scenario. Based on a term formalization of process models, we enable an efficient and effective way to match syntactically different but semantically equivalent process models resulting in precise mappings.}},
  author       = {{Gerth, Christian and Luckey, Markus and Küster, Jochen and Engels, Gregor}},
  booktitle    = {{Proceedings of the IEEE 8th International Conference on Service Computingt (SCC)}},
  pages        = {{218----225}},
  title        = {{{Precise Mappings between Business Process Models in Versioning Scenarios}}},
  doi          = {{10.1109/SCC.2011.65}},
  year         = {{2011}},
}

@inproceedings{654,
  abstract     = {{Research on peer-to-peer (p2p) and distributed systems needs evaluation tools to predict and observe the behavior of protocols and mechanisms in large scale networks. PeerfactSim.KOM is a simulator for large scale distributed/p2p systems aiming at the evaluation of interdependencies in multi-layered p2p systems. The simulator is written in Java, is event-based and mainly used in p2p research projects. The main development of PeerfactSim.KOM started in 2005 and is driven since 2006 by the project “QuaP2P”,which aims at the systematic improvement and benchmarking of p2p systems. Further users of the simulator are working in the project “On-the-ﬂy Computing” aiming at researching p2p-based service oriented architectures. Both projects state severe requirements on the evaluation of multi-layered and large-scale distributed systems. We describe the architecture of PeerfactSim.KOM supporting these requirements in Section II, present the workﬂow, selected experiences and lessons learned in Section III and conclude the overview in Section IV.}},
  author       = {{Graffi, Kalman}},
  booktitle    = {{Proceedings of the IEEE International Conference on Peer-to-Peer Computing (IEEE PsP)}},
  pages        = {{154--155}},
  title        = {{{PeerfactSim.KOM: A PSP System Simulator - Experiences and Lessons Learned}}},
  doi          = {{10.1109/P2P.2011.6038673}},
  year         = {{2011}},
}

@inproceedings{657,
  abstract     = {{We present two distributed, constant factor approximation algorithms for the metric facility location problem. Both algorithms have been designed with a strong emphasis on applicability in the area of wireless sensor networks: in order to execute them, each sensor node only requires limited local knowledge and simple computations. Also, the algorithms can cope with measurement errors and take into account that communication costs between sensor nodes do not necessarily increase linearly with the distance, but can be represented by a polynomial. Since it cannot always be expected that sensor nodes execute algorithms in a synchronized way, our algorithms are executed in an asynchronous model (but they are still able to break symmetry that might occur when two neighboring nodes act at exactly the same time). Furthermore, they can deal with dynamic scenarios: if a node moves, the solution is updated and the update affects only nodes in the local neighborhood. Finally, the algorithms are robust in the sense that incorrect behavior of some nodes during some round will, in the end, still result in a good approximation. The first algorithm runs in expected O(log_{1+\epsilon} n) communication rounds and yields a \my^4(1+4\my^2(1+\epsilon)^{1/p})^p approximation, while the second has a running time of expected O(log^2_{1+\epsilon} n) communication rounds and an approximation factor of \my^4(1 + 2(1 + \epsilon)^{1/p})^p. Here, \epsilon > 0 is an arbitrarily small constant, p the exponent of the polynomial representing the communication costs, and \my the relative measurement error.}},
  author       = {{Abshoff, Sebastan and Cord-Landwehr, Andreas and Degener, Bastian and Kempkes, Barbara  and Pietrzyk, Peter}},
  booktitle    = {{Proceedings of the 7th International Symposium on Algorithms for Sensor Systems, Wireless Ad Hoc Networks and Autonomous Mobile Entities (ALGOSENSORS)}},
  pages        = {{13--27}},
  title        = {{{Local Approximation Algorithms for the Uncapacitated Metric Facility Location Problem in Power-Aware Sensor Networks}}},
  doi          = {{10.1007/978-3-642-28209-6_3}},
  year         = {{2011}},
}

@misc{658,
  author       = {{Schremmer, Alexander}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Function Specification Inference Using Craig Interpolation}}},
  year         = {{2011}},
}

@misc{659,
  author       = {{Liske, Gennadij}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Fault attacks in pairing-based cryptography}}},
  year         = {{2011}},
}

@misc{660,
  author       = {{Peuster, Manuel}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Defining and Deploying Complex Applicances in Multi-Site Cloud Environments}}},
  year         = {{2011}},
}

@misc{661,
  author       = {{Arifulina, Svetlana}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Coverage Criteria for Testing DMM Specifications}}},
  year         = {{2011}},
}

@inproceedings{662,
  abstract     = {{We present Corona, a deterministic self-stabilizing algorithm for skip list construction in structured overlay networks. Corona operates in the low-atomicity message-passing asynchronous system model. Corona requires constant process memory space for its operation and, therefore, scales well. We prove the general necessary conditions limiting the initial states from which a self-stabilizing structured overlay network in message-passing system can be constructed. The conditions require that initial state information has to form a weakly connected graph and it should only contain identiers that are present in the system. We formally describe Corona and rigorously prove that it stabilizes from an arbitrary initial state subject to the necessary conditions. We extend Corona to construct a skip graph.}},
  author       = {{Nesterenko, Mikhail and Mohd, Rizal and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 13th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}},
  pages        = {{356----370}},
  title        = {{{Corona: A Stabilizing Deterministic Message-Passing Skip List}}},
  doi          = {{10.1007/978-3-642-24550-3_27}},
  year         = {{2011}},
}

@misc{663,
  author       = {{Swierkot, Kamil}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Complexity Classes for Local Computation}}},
  year         = {{2011}},
}

@inproceedings{664,
  abstract     = {{Web Computing is a variant of parallel computing where the idle times of PCs donated by worldwide distributed users are employed to execute parallel programs. The PUB-Web library developed by us supports this kind of usage of computing resources. A major problem for the efficient execution of such parallel programs is load balancing. In the Web Computing context, this problem becomes more difficult because of the dynamic behavior of the underlying "parallel computer": the set of available processors (donated PCs) as well as their availability (idle times) change over time in an unpredictable fashion.In this paper, we experimentally evaluate and compare load balancing algorithms in this scenario, namely a variant of the well-established Work Stealing algorithm and strategies based on a heterogeneous version of distributed hash-tables (DHHTs) introduced recently. In order to run a meaningful experimental evaluation, we employ, in addition to our Web Computing library PUB-Web, realistic data sets for the job input streams and for the dynamics of the availability of the resources.Our experimental evaluations suggest that Work Stealing is the better strategy if the number of processes ready to run matches the number of available processors. But a suitable variant of DHHTs outperforms Work Stealing if there are significantly more processes ready to run than available processors.}},
  author       = {{Gehweiler, Joachim and Kling, Peter and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the 9th International Conference on Parallel Processing and Applied Mathematics (PPAM)}},
  pages        = {{31----40}},
  title        = {{{An Experimental Comparison of Load Balancing Strategies in a Web Computing Environment}}},
  doi          = {{10.1007/978-3-642-31500-8_4}},
  year         = {{2011}},
}

@misc{665,
  author       = {{Wette, Philip}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Adaptives Loadbalancing für strukturierte Peer-to-Peer-Netzwerke am Beispiel von Chord}}},
  year         = {{2011}},
}

