@misc{522,
  author       = {{Feldotto, Matthias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{HSkip+: A Self-Stabilizing Overlay Network for Nodes with Heterogeneous Bandwidths}}},
  year         = {{2013}},
}

@unpublished{524,
  abstract     = {{We study the complexity theory for the local distributed setting introduced by Korman, Peleg and Fraigniaud. They have defined three complexity classes LD (Local Decision), NLD (Nondeterministic Local Decision) and NLD^#n. The class LD consists of all languages which can be decided with a constant number of communication rounds. The class NLD consists of all languages which can be verified by a nondeterministic algorithm with a constant number of communication rounds. In order to define the nondeterministic classes, they have transferred the notation of nondeterminism into the distributed setting by the use of certificates and verifiers. The class NLD^#n consists of all languages which can be verified by a nondeterministic algorithm where each node has access to an oracle for the number of nodes. They have shown the hierarchy LD subset NLD subset NLD^#n. Our main contributions are strict hierarchies within the classes defined by Korman, Peleg and Fraigniaud. We define additional complexity classes: the class LD(t) consists of all languages which can be decided with at most t communication rounds. The class NLD-O(f) consists of all languages which can be verified by a local verifier such that the size of the certificates that are needed to verify the language are bounded by a function from O(f). Our main results are refined strict hierarchies within these nondeterministic classes.}},
  author       = {{Meyer auf der Heide, Friedhelm and Swirkot, Kamil}},
  publisher    = {{arXiv}},
  title        = {{{Hierarchies in Local Distributed Decision}}},
  year         = {{2013}},
}

@misc{525,
  author       = {{Niklas Vinkemeier, Tim}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Haptics - Hadoop performance testing in concurrent job scenarios}}},
  year         = {{2013}},
}

@inproceedings{527,
  abstract     = {{In the future vision of software engineering, services from world-wide markets are composed automated in order to build custom-made systems.Supporting such scenarios requires an adequate service matching approach.Many existing approaches do not fulfill two key requirements of emerging concepts like On-The-Fly-Computing, namely (1) comprehensiveness, i.e., the consideration of different service aspects that cover not only functional properties, but also non-functional properties and (2) fuzzy matching, i.e., the ability to deliver gradual results in order to cope with a certain extent of uncertainty, incompleteness, and tolerance ranges.In this paper, I present a fuzzy matching process that distinguishes between different fuzziness sources and leverages fuzziness in different matching steps which consider different service aspects, e.g., behavior and quality properties. }},
  author       = {{Christin Platenius, Marie}},
  booktitle    = {{Proceedings of the Doctoral Symposium of the 9th joint meeting of the European Software Engineering Conference (ESEC) and the ACM SIGSOFT Symposium on the Foundations of Software Engineering (FSE)}},
  pages        = {{ 715--718 }},
  title        = {{{Fuzzy Service Matching in On-The-Fly Computing}}},
  doi          = {{10.1145/2491411.2492405}},
  year         = {{2013}},
}

@misc{534,
  author       = {{Satya, Suhas}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Emulating Wavelength Division Multiplexing using Openflow}}},
  year         = {{2013}},
}

@unpublished{538,
  abstract     = {{We present a new technique to realize attribute-based encryption (ABE) schemes secure in the standard model against chosen-ciphertext attacks (CCA-secure). Our approach is to extend certain concrete chosen-plaintext secure (CPA-secure) ABE schemes to achieve more efficient constructions than the known generic constructions of CCA-secure ABE schemes. We restrict ourselves to the construction of attribute-based key encapsulation mechanisms (KEMs) and present two concrete CCA-secure schemes: a key-policy attribute-based KEM that is based on Goyal's key-policy ABE and a ciphertext-policy attribute-based KEM that is based on Waters' ciphertext-policy ABE. To achieve our goals, we use an appropriate hash function and need to extend the public parameters and the ciphertexts of the underlying CPA-secure encryption schemes only by a single group element. Moreover, we use the same hardness assumptions as the underlying CPA-secure encryption schemes.}},
  author       = {{Blömer, Johannes and Liske, Gennadij}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Direct Chosen-Ciphertext Secure Attribute-Based Key Encapsulations without Random Oracles}}},
  year         = {{2013}},
}

@inproceedings{541,
  abstract     = {{Existing solutions for gossip-based aggregation in peer-to-peer networks use epochs to calculate a global estimation from an initial static set of local values. Once the estimation converges system-wide, a new epoch is started with fresh initial values. Long epochs result in precise estimations based on old measurements and short epochs result in imprecise aggregated estimations. In contrast to this approach, we present in this paper a continuous, epoch-less approach which considers fresh local values in every round of the gossip-based aggregation. By using an approach for dynamic information aging, inaccurate values and values from left peers fade from the aggregation memory. Evaluation shows that the presented approach for continuous information aggregation in peer-to-peer systems monitors the system performance precisely, adapts to changes and is lightweight to operate.}},
  author       = {{Graffi, Kalman and Rapp, Vitaly}},
  booktitle    = {{Proceedings of the International Conference on Computer Communications and Networks (ICCCN'13)}},
  pages        = {{1--7}},
  title        = {{{Continuous Gossip-based Aggregation through Dynamic Information Aging}}},
  doi          = {{10.1109/ICCCN.2013.6614118}},
  year         = {{2013}},
}

@inproceedings{542,
  abstract     = {{We consider the problem of managing a dynamic heterogeneous storagesystem in a distributed way so that the amount of data assigned to a hostin that system is related to its capacity. Two central problems have to be solvedfor this: (1) organizing the hosts in an overlay network with low degree and diameterso that one can efficiently check the correct distribution of the data androute between any two hosts, and (2) distributing the data among the hosts so thatthe distribution respects the capacities of the hosts and can easily be adapted asthe set of hosts or their capacities change. We present distributed protocols forthese problems that are self-stabilizing and that do not need any global knowledgeabout the system such as the number of nodes or the overall capacity of thesystem. Prior to this work no solution was known satisfying these properties.}},
  author       = {{Kniesburges, Sebastian and Koutsopoulos, Andreas and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 27th International Symposium on Distributed Computing (DISC)}},
  pages        = {{537--549}},
  title        = {{{CONE-DHT: A distributed self-stabilizing algorithm for a heterogeneous storage system}}},
  doi          = {{10.1007/978-3-642-41527-2_37}},
  year         = {{2013}},
}

@inproceedings{544,
  abstract     = {{Comparative evaluations of peer-to-peer protocols through simulations are a viable approach to judge the performance and costs of the individual protocols in large-scale networks. In order to support this work, we enhanced the peer-to-peer systems simulator PeerfactSim.KOM with a fine-grained analyzer concept, with exhaustive automated measurements and gnuplot generators as well as a coordination control to evaluate a set of experiment setups in parallel. Thus, by configuring all experiments and protocols only once and starting the simulator, all desired measurements are performed, analyzed, evaluated and combined, resulting in a holistic environment for the comparative evaluation of peer-to-peer systems.}},
  author       = {{Feldotto, Matthias and Graffi, Kalman}},
  booktitle    = {{Proceedings of the International Conference on High Performance Computing and Simulation (HPCS'13)}},
  pages        = {{99--106}},
  title        = {{{Comparative Evaluation of Peer-to-Peer Systems Using PeerfactSim.KOM}}},
  doi          = {{10.1109/HPCSim.2013.6641399}},
  year         = {{2013}},
}

@inproceedings{546,
  abstract     = {{Self-stabilization is the property of a system to transfer itself regardless of the initial state into a legitimate state. Chord as a simple, decentralized and scalable distributed hash table is an ideal showcase to introduce self-stabilization for p2p overlays. In this paper, we present Re-Chord, a self-stabilizing version of Chord. We show, that the stabilization process is functional, but prone to strong churn. For that, we present Ca-Re-Chord, a churn resistant version of Re-Chord, that allows the creation of a useful DHT in any kind of graph regardless of the initial state. Simulation results attest the churn resistance and good performance of Ca-Re-Chord.}},
  author       = {{Graffi, Kalman and Benter, Markus and Divband, Mohammad and Kniesburges, Sebastian and Koutsopoulos, Andreas}},
  booktitle    = {{Proceedings of the Conference on Networked Systems (NetSys)}},
  pages        = {{27--34}},
  title        = {{{Ca-Re-Chord: A Churn Resistant Self-stabilizing Chord Overlay Network}}},
  doi          = {{10.1109/NetSys.2013.11}},
  year         = {{2013}},
}

@phdthesis{547,
  abstract     = {{In recent years, the role of process models in the development of enterprise software systems has increased continuously. Today, process models are used at different levels in the development process. For instance, in Service-Oriented Architectures (SOA), high-level business process models become input for the development of IT systems, and in running IT systems executable process models describe choreographies of Web Services. A key driver behind this development is the necessity for a closer alignment of business and IT requirements, to reduce the reaction times in software development to frequent changes in competitive markets.Typically in these scenarios, process models are developed, maintained, and transformed in a team environment by several stakeholders that are often from different business units, resulting in different versions. To obtain integrated process models comprising the changes applied to different versions, the versions need to be consolidated by means of model change management. Change management for process models can be compared to widely used concurrent versioning systems (CVS) and consists of the following major activities: matching of process models, detection of differences, computation of dependencies and conflicts between differences, and merging of process models.Although in general model-driven development (MDD) is accepted as a well-established development approach, there are still some shortcomings that let developers decide against MDD and for more traditional development paradigms. These shortcomings comprise a lack of fully integrated and fully featured development environments for MDD, such as a comprehensive support for model change management.In this thesis, we present a framework for process model change management. The framework is based on an intermediate representation for process models that serves as an abstraction of specific process modeling languages and focuses on common syntactic and semantic core concepts for the modeling of workflow in process models. Based on the intermediate representation, we match process models in versioning scenarios and compute differences between process models generically. Further, we consider the analysis of dependencies between differences and show how conflicts between differences can be computed by taking into account the semantics of the modeling language.As proof-of concept, we have implemented major parts of this framework in terms of a prototype. The detection of differences and dependencies contributed also to the Compare & Merge framework for the IBM WebSphere Business Modeler V 7.0 [1] (WBM), which was released as a product in fall 2009.}},
  author       = {{Gerth, Christian}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Business Process Models - Change Management}}},
  doi          = {{10.1007/978-3-642-38604-6}},
  year         = {{2013}},
}

@inproceedings{548,
  abstract     = {{Peer-to-peer systems scale to millions of nodes and provide routing and storage functions with best effort quality. In order to provide a guaranteed quality of the overlay functions, even under strong dynamics in the network with regard to peer capacities, online participation and usage patterns, we propose to calibrate the peer-to-peer overlay and to autonomously learn which qualities can be reached. For that, we simulate the peer-to-peer overlay systematically under a wide range of parameter configurations and use neural networks to learn the effects of the configurations on the quality metrics. Thus, by choosing a specific quality setting by the overlay operator, the network can tune itself to the learned parameter configurations that lead to the desired quality. Evaluation shows that the presented self-calibration succeeds in learning the configuration-quality interdependencies and that peer-to-peer systems can learn and adapt their behavior according to desired quality goals.}},
  author       = {{Graffi, Kalman and Klerx, Timo}},
  booktitle    = {{Proceedings of the International Conference on Peer-to-Peer Computing (P2P'13)}},
  pages        = {{1--5}},
  title        = {{{Bootstrapping Skynet: Calibration and Autonomic Self-Control of Structured Peer-to-Peer Networks}}},
  doi          = {{10.1109/P2P.2013.6688720}},
  year         = {{2013}},
}

@inproceedings{551,
  abstract     = {{In the service-oriented computing domain, the number of available software services steadily increased in recent years, favored by the rise of cloud computing with its attached delivery models like Software-as-a-Service (SaaS). To fully leverage the opportunities provided by these services for developing highly flexible and aligned SOA, integration of new services as well as the substitution of existing services must be simplified. As a consequence, approaches for automated and accurate service discovery and composition are needed. In this paper, we propose an automatic service composition approach as an extension to our earlier work on automatic service discovery. To ensure accurate results, it matches service requests and available offers based on their structural as well as behavioral aspects. Afterwards, possible service compositions are determined by composing service protocols through a composition strategy based on labeled transition systems.}},
  author       = {{Huma, Zille and Gerth, Christian and Engels, Gregor and Juwig, Oliver}},
  booktitle    = {{Proceedings of the 11th International Conference on Service Oriented Computing (ICSOC'13)}},
  pages        = {{524----532}},
  title        = {{{Automated Service Composition for On-the-Fly SOAs}}},
  doi          = {{10.1007/978-3-642-45005-1_42}},
  year         = {{2013}},
}

@misc{553,
  author       = {{Kohn, Kathlén}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Attributbasierte Verschlüsselung mittels Gittermethoden - Mathematische Grundlagen, Verfahren und Sicherheitsbeweise}}},
  year         = {{2013}},
}

@misc{555,
  author       = {{Setzer, Alexander}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Approximation Algorithms for the Linear Arrangement of Special Classes of Graphs}}},
  year         = {{2013}},
}

@proceedings{558,
  editor       = {{Flocchini, Paola and Gao, Jie and Kranakis, Evangelos and Meyer auf der Heide, Friedhelm}},
  location     = {{Sophia Antipolis, France}},
  publisher    = {{Springer}},
  title        = {{{Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics}}},
  doi          = {{10.1007/978-3-642-45346-5}},
  volume       = {{8243}},
  year         = {{2013}},
}

@inproceedings{559,
  abstract     = {{Distributed hash tables are very versatile to use, as distributed storage is a desirable feature for various applications. Typical structured overlays like Chord, Pastry or Kademlia consider only homogeneous nodes with equal capacities, which does not resemble reality. In a practical use case, nodes might get overloaded by storing popular data. In this paper, we present a general approach to enable capacity awareness and load-balancing capability of homogeneous structured overlays. We introduce a hierarchical second structured overlay aside, which allows efficient capacity-based access on the nodes in the system as hosting mirrors. Simulation results show that the structured overlay is able to store various contents, such as of a social network, with only a negligible number of overloaded peers. Content, even if very popular, is hosted by easily findable capable peers. Thus, long-existing and well-evaluated overlays like Chord or Pastry can be used to create attractive DHT-based applications.}},
  author       = {{Wette, Philip and Graffi, Kalman}},
  booktitle    = {{Proceedings of the Conference on Networked Systems (NetSys)}},
  pages        = {{35--42}},
  title        = {{{Adding Capacity-Aware Storage Indirection to Homogeneous Distributed Hash Tables}}},
  doi          = {{10.1109/NetSys.2013.9}},
  year         = {{2013}},
}

@inproceedings{560,
  abstract     = {{In the last decades, development turned from monolithic software products towards more flexible software components that can be provided on world-wide markets in form of services. Customers request such services or compositions of several services. However, in many cases, discovering the best services to address a given request is a tough challenge and requires expressive, gradual matching results, considering different aspects of a service description, e.g., inputs/ouputs, protocols, or quality properties. Furthermore,in situations in which no service exactly satifies the request, approximate matching which can deal with a certain amount of fuzziness becomes necessary. There is a wealth of service matching approaches, but it is not clear whether there is a comprehensive, fuzzy matching approach which addresses all these challenges. Although there are a few service matchingsurveys, none of them is able to answer this question. In this paper, we perform a systematic literature survey of 35 (outof 504) service matching approaches which consider fuzzy matching. Based on this survey, we propose a classication,discuss how different matching approaches can be combined into a comprehensive matching method, and identify future research challenges.}},
  author       = {{Platenius, Marie and von Detten, Markus and Becker, Steffen and Schäfer, Wilhelm and Engels, Gregor}},
  booktitle    = {{Proceedings of the 16th International ACM Sigsoft Symposium on Component-Based Software Engineering}},
  pages        = {{143--152}},
  title        = {{{A Survey of Fuzzy Service Matching Approaches in the Context of On-The-Fly Computing}}},
  doi          = {{10.1145/2465449.2465454}},
  year         = {{2013}},
}

@inproceedings{562,
  abstract     = {{In Distributed Cloud Computing, applications are deployed across many data centres at topologically diverse locations to improved network-related quality of service (QoS). As we focus on interactive applications, we minimize the latency between users and an application by allocating Cloud resources nearby the customers. Allocating resources at all locations will result in the best latency but also in the highest expenses. So we need to find an optimal subset of locations which reduces the latency but also the expenses – the facility location problem (FLP). In addition, we consider resource capacity restrictions, as a resource can only serve a limited amount of users. An FLP can be globally solved. Additionally, we propose a local, distributed heuristic. This heuristic is running within the network and does not depend on a global component. No distributed, local approximations for the capacitated FLP have been proposed so far due to the complexity of the problem. We compared the heuristic with an optimal solution obtained from a mixed integer program for different network topologies. We investigated the influence of different parameters like overall resource utilization or different latency weights.}},
  author       = {{Keller, Matthias and Pawlik, Stefan and Pietrzyk, Peter and Karl, Holger}},
  booktitle    = {{Proceedings of the 6th International Conference on Utility and Cloud Computing (UCC) workshop on Distributed cloud computing}},
  pages        = {{429--434}},
  title        = {{{A Local Heuristic for Latency-Optimized Distributed Cloud Deployment}}},
  doi          = {{10.1109/UCC.2013.85}},
  year         = {{2013}},
}

@inproceedings{563,
  abstract     = {{Dominating set based virtual backbones are used for rou-ting in wireless ad-hoc networks. Such backbones receive and transmit messages from/to every node in the network. Existing distributed algorithms only consider undirected graphs, which model symmetric networks with uniform transmission ranges. We are particularly interested in the well-established disk graphs, which model asymmetric networks with non-uniform transmission ranges. The corresponding graph theoretic problem seeks a strongly connected dominating-absorbent set of minimum cardinality in a digraph. A subset of nodes in a digraph is a strongly connected dominating-absorbent set if the subgraph induced by these nodes is strongly connected and each node in the graph is either in the set or has both an in-neighbor and an out-neighbor in it. We introduce the first distributed algorithm for this problem in disk graphs. The algorithm gives an O(k^4) -approximation ratio and has a runtime bound of O(Diam) where Diam is the diameter of the graph and k denotes the transmission ratio r_{max}/r_{min} with r_{max} and r_{min} being the maximum and minimum transmission range, respectively. Moreover, we apply our algorithm on the subgraph of disk graphs consisting of only bidirectional edges. Our algorithm gives an O(ln k) -approximation and a runtime bound of O(k^8 log^∗ n) , which, for bounded k , is an optimal approximation for the problem, following Lenzen and Wattenhofer’s Ω(log^∗ n) runtime lower bound for distributed constant approximation in disk graphs.}},
  author       = {{Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael}},
  booktitle    = {{Proceedings of the 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics (ALGOSENSORS)}},
  pages        = {{217--227}},
  title        = {{{A Distributed Approximation Algorithm for Strongly Connected Dominating-Absorbent Sets in Asymmetric Wireless Ad-Hoc Networks}}},
  doi          = {{10.1007/978-3-642-45346-5_16}},
  year         = {{2013}},
}

