@inproceedings{508,
  abstract     = {{The process of planning a virtual topology for a Wavelength Devision Multiplexing (WDM) network is called Virtual Topology Design (VTD). The goal of VTD is to find a virtual topology that supports forwarding the expected traffic without congestion. In networks with fluctuating, high traffic demands, it can happen that no single topology fits all changing traffic demands occurring over a longer time. Thus, during operation, the virtual topology has to be reconfigured. Since modern networks tend to be large, VTD algorithms have to scale well with increasing network size, requiring distributed algorithms. Existing distributed VTD algorithms, however, react too slowly on congestion for the real-time reconfiguration of large networks. We propose Selfish Virtual Topology Reconfiguration (SVTR) as a new algorithm for distributed VTD. It combines reconfiguring the virtual topology and routing through a Software Defined Network (SDN). SVTR is used for online, on-the-fly network reconfiguration. Its integrated routing and WDM reconfiguration keeps connection disruption due to network reconfiguration to a minimum and is able to react very quickly to traffic pattern changes. SVTR works by iteratively adapting the virtual topology to the observed traffic patterns without global traffic information and without future traffic estimations. We evaluated SVTR by simulation and found that it significantly lowers congestion in realistic networks and high load scenarios.}},
  author       = {{Wette, Philip and Karl, Holger}},
  booktitle    = {{Proceedings of the 19th IEEE International Workshop on Local and Metropolitan Area Networks (IEEE LANMAN)}},
  pages        = {{1 -- 6 }},
  title        = {{{On the Quality of Selfish Virtual Topology Reconfiguration in IP-over-WDM Networks}}},
  doi          = {{10.1109/LANMAN.2013.6528271}},
  year         = {{2013}},
}

@inproceedings{509,
  abstract     = {{In this paper we will introduce a new d-dimensional graph for constructing geometric application layer overlay net-works. Our approach will use internet coordinates, embedded using the L∞ -metric. After describing the graph structure, we will show how it limits maintenance overhead by bounding each node’s out-degree and how it supports greedy routing using one-hop neighbourhood information in each routing step. We will further show that greedy routing can always compute a path in our graph and we will also prove that in each forwarding step the next hop is closer to the destination than the current node.}},
  author       = {{Autenrieth, Marcus and Frey, Hannes}},
  booktitle    = {{Proceedings of the Conference on Networked Systems (NetSys)}},
  pages        = {{126--131}},
  title        = {{{On Greedy Routing in Degree-bounded Graphs over d-Dimensional Internet Coordinate Embeddings}}},
  doi          = {{10.1109/NetSys.2013.10}},
  year         = {{2013}},
}

@misc{511,
  author       = {{Splietker, Malte}},
  publisher    = {{Universität Paderborn}},
  title        = {{{MapReduce in Software Defined Networks}}},
  year         = {{2013}},
}

@inproceedings{513,
  abstract     = {{This paper initiates the study of self-adjusting networks (or distributed data structures) whose topologies dynamically adapt to a communication pattern $\sigma$. We present a fully decentralized self-adjusting solution called SplayNet. A SplayNet is a distributed generalization of the classic splay tree concept. It ensures short paths (which can be found using local-greedy routing) between communication partners while minimizing topological rearrangements. We derive an upper bound for the amortized communication cost of a SplayNet based on empirical entropies of $\sigma$, and show that SplayNets have several interesting convergence properties. For instance, SplayNets features a provable online optimality under special requests scenarios. We also investigate the optimal static network and prove different lower bounds for the average communication cost based on graph cuts and on the empirical entropy of the communication pattern $\sigma$. From these lower bounds it follows, e.g., that SplayNets are optimal in scenarios where the requests follow a product distribution as well. Finally, this paper shows that in contrast to the Minimum Linear Arrangement problem which is generally NP-hard, the optimal static tree network can be computed in polynomial time for any guest graph, despite the exponentially large graph family. We complement our formal analysis with a small simulation study on a Facebook graph.}},
  author       = {{Avin, Chen and Häupler, Bernhard and Lotker, Zvi and Scheideler, Christian and Schmid, Stefan}},
  booktitle    = {{Proceedings of the 27th IEEE International Parallel and Distributed Processing Symposium (IPDPS)}},
  pages        = {{395--406}},
  title        = {{{Locally Self-Adjusting Tree Networks}}},
  doi          = {{10.1109/IPDPS.2013.40}},
  year         = {{2013}},
}

@phdthesis{514,
  abstract     = {{Diese Arbeit besch{\"a}ftigt sich mit dem Facility Location Problem. Dies ist ein Optimierungsproblem, bei dem festgelegt werden muss an welchen Positionen Ressourcen zur Verf{\"u}gung gestellt werden, so dass diese von Nutzern gut erreicht werden k{\"o}nnen. Es sollen dabei Kosten minimiert werden, die zum einen durch Bereitstellung von Ressourcen und zum anderen durch Verbindungskosten zwischen Nutzern und Ressourcen entstehen. Die Schwierigkeit des Problems liegt darin, dass man einerseits m{\"o}glichst wenige Ressourcen zur Verf{\"u}gung stellen m{\"o}chte, andererseits daf{\"u}r sorgen muss, dass sich Nutzer nicht all zu weit weg von Ressourcen befinden. Dies w{\"u}rde n{\"a}mlich hohe Verbindungskosten nach sich ziehen. Das Facility Location Problem wurde bereits sehr intensiv in vielen unterschiedlichen Varianten untersucht. In dieser Arbeit werden drei Varianten des Problems modelliert und neue Algorithmen f{\"u}r sie entwickelt und bez{\"u}glich ihres Approximationsfaktors und ihrer Laufzeit analysiert. Jede dieser drei untersuchten Varianten hat einen besonderen Schwerpunkt. Bei der ersten Varianten handelt es sich um ein Online Problem, da hier die Eingabe nicht von Anfang an bekannt ist, sondern Schritt f{\"u}r Schritt enth{\"u}llt wird. Die Schwierigkeit hierbei besteht darin unwiderrufliche Entscheidungen treffen zu m{\"u}ssen ohne dabei die Zukunft zu kennen und trotzdem eine zu jeder Zeit gute L{\"o}sung angeben zu k{\"o}nnen. Der Schwerpunkt der zweiten Variante liegt auf Lokalit{\"a}t, die z.B. in Sensornetzwerken von großer Bedeutung ist. Hier soll eine L{\"o}sung verteilt und nur mit Hilfe von lokalen Information berechnet werden. Schließlich besch{\"a}ftigt sich die dritte Variante mit einer verteilten Berechnung, bei welcher nur eine stark beschr{\"a}nkte Datenmenge verschickt werden darf und dabei trotzdem ein sehr guter Approximationsfaktor erreicht werden muss. Die bei der Analyse der Approximationsfaktoren bzw. der Kompetitivit{\"a}t verwendeten Techniken basieren zum großen Teil auf Absch{\"a}tzung der primalen L{\"o}sung mit Hilfe einer L{\"o}sung des zugeh{\"o}rigen dualen Problems. F{\"u}r die Modellierung von Lokalit{\"a}t wird das weitverbreitete LOCAL Modell verwendet. In diesem Modell werden f{\"u}r die Algorithmen subpolynomielle obere Laufzeitschranken gezeigt.}},
  author       = {{Pietrzyk, Peter}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Local and Online Algorithms for Facility Location}}},
  year         = {{2013}},
}

@inproceedings{517,
  abstract     = {{In the Semantic (Web) Services area, services are considered black boxes with a semantic description of their interfaces as to allow for precise service selection and conﬁguration. The semantic description is usually grounded on domain-speciﬁc concepts as modeled in ontologies. This accounts for types used in service signatures, but also predicates occurring in preconditions and effects of services. Ontologies, in particular those enhanced with rules, capture the knowledge of domain experts on properties of and relations between domain concepts. In this paper, we present a veriﬁcation technique for service compositions which makes use of this domain knowledge. We consider a service composition to be an assembly of services of which we just know signatures, preconditions, and effects. We aim at proving that a composition satisﬁes a (user-deﬁned) requirement, speciﬁed in terms of guaranteed preconditions and required postconditions. As an underlying veriﬁcation engine we use an SMT solver. To take advantage of the domain knowledge (and often, to enable veriﬁcation at all), the knowledge is fed into the solver in the form of sorts, uninterpreted functions and in particular assertions as to enhance the solver’s reasoning capabilities. Thereby, we allow for deductions within a domain previously unknown to the solver. We exemplify our technique on a case study from the area of water network optimization software.}},
  author       = {{Walther, Sven and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 18th IEEE International Conference on Engineering of Complex Computer Systems (ICECCS)}},
  pages        = {{24 -- 32 }},
  title        = {{{Knowledge-Based Verification of Service Compositions - An SMT approach}}},
  doi          = {{10.1109/ICECCS.2013.14}},
  year         = {{2013}},
}

@inproceedings{519,
  abstract     = {{In this work we present the first scalable distributed information system,i.e., a system with low storage overhead, that is provably robust againstDenial-of-Service (DoS) attacks by a current insider. We allow acurrent insider to have complete knowledge about the information systemand to have the power to block any \epsilon-fraction of its serversby a DoS-attack, where \epsilon can be chosen up to a constant. The taskof the system is to serve any collection of lookup requests with at most oneper non-blocked server in an efficient way despite this attack. Previously,scalable solutions were only known for DoS-attacks of past insiders, where apast insider only has complete knowledge about some past time pointt_0 of the information system. Scheideler et al. (DISC 2007, SPAA 2009) showedthat in this case it is possible to design an information system so that anyinformation that was inserted or last updated after t_0 is safe against a DoS-attack. But their constructions would not work at all for a current insider. The key idea behindour IRIS system is to make extensive use of coding. More precisely, we presenttwo alternative distributed coding strategies with an at most logarithmicstorage overhead that can handle up to a constant fraction of blocked servers.}},
  author       = {{Eikel, Martina and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 25th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)}},
  pages        = {{119--129}},
  title        = {{{IRIS: A Robust Information System Against Insider DoS-Attacks}}},
  doi          = {{10.1145/2486159.2486186}},
  year         = {{2013}},
}

@inproceedings{520,
  abstract     = {{Preemptive Routing and Wavelength Assignment (RWA) algorithms preempt established lightpaths in case not enough resources are available to set up a new lightpath in a Wavelength Division Multiplexing (WDM) network. The selection of lightpaths to be preempted relies on internal decisions of the RWA algorithm. Thus, if dedicated properties of the network topology are required by the applications running on the network, these requirements have to be known to the RWA algorithm.Otherwise it might happen that by preempting a particular lightpath these requirements are violated. If, however, these requirements include parametersknown only at the nodes running the application, the RWA algorithm cannot evaluate the requirements. For this reason an RWA algorithm is needed which incorporates feedback from the application layer in the preemption decisions.This work proposes a simple interface along with an algorithm for computing and selecting preemption candidates in case a lightpath cannot be established. We reason about the necessity of using information from the application layer in the RWA and present two example applications which benefit from this idea.}},
  author       = {{Wette, Philip and Karl, Holger}},
  booktitle    = {{Proceedings of the 32nd IEEE International Conference on Computer Communications (INFOCOM)}},
  pages        = {{51--52}},
  title        = {{{Incorporating feedback from application layer into routing and wavelength assignment algorithms}}},
  doi          = {{10.1109/INFCOMW.2013.6970733}},
  year         = {{2013}},
}

@misc{521,
  author       = {{Riebler, Heinrich}},
  keywords     = {{coldboot}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Identifikation und Wiederherstellung von kryptographischen Schlüsseln mit FPGAs}}},
  year         = {{2013}},
}

@misc{522,
  author       = {{Feldotto, Matthias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{HSkip+: A Self-Stabilizing Overlay Network for Nodes with Heterogeneous Bandwidths}}},
  year         = {{2013}},
}

@unpublished{524,
  abstract     = {{We study the complexity theory for the local distributed setting introduced by Korman, Peleg and Fraigniaud. They have defined three complexity classes LD (Local Decision), NLD (Nondeterministic Local Decision) and NLD^#n. The class LD consists of all languages which can be decided with a constant number of communication rounds. The class NLD consists of all languages which can be verified by a nondeterministic algorithm with a constant number of communication rounds. In order to define the nondeterministic classes, they have transferred the notation of nondeterminism into the distributed setting by the use of certificates and verifiers. The class NLD^#n consists of all languages which can be verified by a nondeterministic algorithm where each node has access to an oracle for the number of nodes. They have shown the hierarchy LD subset NLD subset NLD^#n. Our main contributions are strict hierarchies within the classes defined by Korman, Peleg and Fraigniaud. We define additional complexity classes: the class LD(t) consists of all languages which can be decided with at most t communication rounds. The class NLD-O(f) consists of all languages which can be verified by a local verifier such that the size of the certificates that are needed to verify the language are bounded by a function from O(f). Our main results are refined strict hierarchies within these nondeterministic classes.}},
  author       = {{Meyer auf der Heide, Friedhelm and Swirkot, Kamil}},
  publisher    = {{arXiv}},
  title        = {{{Hierarchies in Local Distributed Decision}}},
  year         = {{2013}},
}

@misc{525,
  author       = {{Niklas Vinkemeier, Tim}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Haptics - Hadoop performance testing in concurrent job scenarios}}},
  year         = {{2013}},
}

@inproceedings{527,
  abstract     = {{In the future vision of software engineering, services from world-wide markets are composed automated in order to build custom-made systems.Supporting such scenarios requires an adequate service matching approach.Many existing approaches do not fulfill two key requirements of emerging concepts like On-The-Fly-Computing, namely (1) comprehensiveness, i.e., the consideration of different service aspects that cover not only functional properties, but also non-functional properties and (2) fuzzy matching, i.e., the ability to deliver gradual results in order to cope with a certain extent of uncertainty, incompleteness, and tolerance ranges.In this paper, I present a fuzzy matching process that distinguishes between different fuzziness sources and leverages fuzziness in different matching steps which consider different service aspects, e.g., behavior and quality properties. }},
  author       = {{Christin Platenius, Marie}},
  booktitle    = {{Proceedings of the Doctoral Symposium of the 9th joint meeting of the European Software Engineering Conference (ESEC) and the ACM SIGSOFT Symposium on the Foundations of Software Engineering (FSE)}},
  pages        = {{ 715--718 }},
  title        = {{{Fuzzy Service Matching in On-The-Fly Computing}}},
  doi          = {{10.1145/2491411.2492405}},
  year         = {{2013}},
}

@misc{534,
  author       = {{Satya, Suhas}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Emulating Wavelength Division Multiplexing using Openflow}}},
  year         = {{2013}},
}

@unpublished{538,
  abstract     = {{We present a new technique to realize attribute-based encryption (ABE) schemes secure in the standard model against chosen-ciphertext attacks (CCA-secure). Our approach is to extend certain concrete chosen-plaintext secure (CPA-secure) ABE schemes to achieve more efficient constructions than the known generic constructions of CCA-secure ABE schemes. We restrict ourselves to the construction of attribute-based key encapsulation mechanisms (KEMs) and present two concrete CCA-secure schemes: a key-policy attribute-based KEM that is based on Goyal's key-policy ABE and a ciphertext-policy attribute-based KEM that is based on Waters' ciphertext-policy ABE. To achieve our goals, we use an appropriate hash function and need to extend the public parameters and the ciphertexts of the underlying CPA-secure encryption schemes only by a single group element. Moreover, we use the same hardness assumptions as the underlying CPA-secure encryption schemes.}},
  author       = {{Blömer, Johannes and Liske, Gennadij}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Direct Chosen-Ciphertext Secure Attribute-Based Key Encapsulations without Random Oracles}}},
  year         = {{2013}},
}

@inproceedings{541,
  abstract     = {{Existing solutions for gossip-based aggregation in peer-to-peer networks use epochs to calculate a global estimation from an initial static set of local values. Once the estimation converges system-wide, a new epoch is started with fresh initial values. Long epochs result in precise estimations based on old measurements and short epochs result in imprecise aggregated estimations. In contrast to this approach, we present in this paper a continuous, epoch-less approach which considers fresh local values in every round of the gossip-based aggregation. By using an approach for dynamic information aging, inaccurate values and values from left peers fade from the aggregation memory. Evaluation shows that the presented approach for continuous information aggregation in peer-to-peer systems monitors the system performance precisely, adapts to changes and is lightweight to operate.}},
  author       = {{Graffi, Kalman and Rapp, Vitaly}},
  booktitle    = {{Proceedings of the International Conference on Computer Communications and Networks (ICCCN'13)}},
  pages        = {{1--7}},
  title        = {{{Continuous Gossip-based Aggregation through Dynamic Information Aging}}},
  doi          = {{10.1109/ICCCN.2013.6614118}},
  year         = {{2013}},
}

@inproceedings{542,
  abstract     = {{We consider the problem of managing a dynamic heterogeneous storagesystem in a distributed way so that the amount of data assigned to a hostin that system is related to its capacity. Two central problems have to be solvedfor this: (1) organizing the hosts in an overlay network with low degree and diameterso that one can efficiently check the correct distribution of the data androute between any two hosts, and (2) distributing the data among the hosts so thatthe distribution respects the capacities of the hosts and can easily be adapted asthe set of hosts or their capacities change. We present distributed protocols forthese problems that are self-stabilizing and that do not need any global knowledgeabout the system such as the number of nodes or the overall capacity of thesystem. Prior to this work no solution was known satisfying these properties.}},
  author       = {{Kniesburges, Sebastian and Koutsopoulos, Andreas and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 27th International Symposium on Distributed Computing (DISC)}},
  pages        = {{537--549}},
  title        = {{{CONE-DHT: A distributed self-stabilizing algorithm for a heterogeneous storage system}}},
  doi          = {{10.1007/978-3-642-41527-2_37}},
  year         = {{2013}},
}

@inproceedings{544,
  abstract     = {{Comparative evaluations of peer-to-peer protocols through simulations are a viable approach to judge the performance and costs of the individual protocols in large-scale networks. In order to support this work, we enhanced the peer-to-peer systems simulator PeerfactSim.KOM with a fine-grained analyzer concept, with exhaustive automated measurements and gnuplot generators as well as a coordination control to evaluate a set of experiment setups in parallel. Thus, by configuring all experiments and protocols only once and starting the simulator, all desired measurements are performed, analyzed, evaluated and combined, resulting in a holistic environment for the comparative evaluation of peer-to-peer systems.}},
  author       = {{Feldotto, Matthias and Graffi, Kalman}},
  booktitle    = {{Proceedings of the International Conference on High Performance Computing and Simulation (HPCS'13)}},
  pages        = {{99--106}},
  title        = {{{Comparative Evaluation of Peer-to-Peer Systems Using PeerfactSim.KOM}}},
  doi          = {{10.1109/HPCSim.2013.6641399}},
  year         = {{2013}},
}

@inproceedings{546,
  abstract     = {{Self-stabilization is the property of a system to transfer itself regardless of the initial state into a legitimate state. Chord as a simple, decentralized and scalable distributed hash table is an ideal showcase to introduce self-stabilization for p2p overlays. In this paper, we present Re-Chord, a self-stabilizing version of Chord. We show, that the stabilization process is functional, but prone to strong churn. For that, we present Ca-Re-Chord, a churn resistant version of Re-Chord, that allows the creation of a useful DHT in any kind of graph regardless of the initial state. Simulation results attest the churn resistance and good performance of Ca-Re-Chord.}},
  author       = {{Graffi, Kalman and Benter, Markus and Divband, Mohammad and Kniesburges, Sebastian and Koutsopoulos, Andreas}},
  booktitle    = {{Proceedings of the Conference on Networked Systems (NetSys)}},
  pages        = {{27--34}},
  title        = {{{Ca-Re-Chord: A Churn Resistant Self-stabilizing Chord Overlay Network}}},
  doi          = {{10.1109/NetSys.2013.11}},
  year         = {{2013}},
}

@phdthesis{547,
  abstract     = {{In recent years, the role of process models in the development of enterprise software systems has increased continuously. Today, process models are used at different levels in the development process. For instance, in Service-Oriented Architectures (SOA), high-level business process models become input for the development of IT systems, and in running IT systems executable process models describe choreographies of Web Services. A key driver behind this development is the necessity for a closer alignment of business and IT requirements, to reduce the reaction times in software development to frequent changes in competitive markets.Typically in these scenarios, process models are developed, maintained, and transformed in a team environment by several stakeholders that are often from different business units, resulting in different versions. To obtain integrated process models comprising the changes applied to different versions, the versions need to be consolidated by means of model change management. Change management for process models can be compared to widely used concurrent versioning systems (CVS) and consists of the following major activities: matching of process models, detection of differences, computation of dependencies and conflicts between differences, and merging of process models.Although in general model-driven development (MDD) is accepted as a well-established development approach, there are still some shortcomings that let developers decide against MDD and for more traditional development paradigms. These shortcomings comprise a lack of fully integrated and fully featured development environments for MDD, such as a comprehensive support for model change management.In this thesis, we present a framework for process model change management. The framework is based on an intermediate representation for process models that serves as an abstraction of specific process modeling languages and focuses on common syntactic and semantic core concepts for the modeling of workflow in process models. Based on the intermediate representation, we match process models in versioning scenarios and compute differences between process models generically. Further, we consider the analysis of dependencies between differences and show how conflicts between differences can be computed by taking into account the semantics of the modeling language.As proof-of concept, we have implemented major parts of this framework in terms of a prototype. The detection of differences and dependencies contributed also to the Compare & Merge framework for the IBM WebSphere Business Modeler V 7.0 [1] (WBM), which was released as a product in fall 2009.}},
  author       = {{Gerth, Christian}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Business Process Models - Change Management}}},
  doi          = {{10.1007/978-3-642-38604-6}},
  year         = {{2013}},
}

