@phdthesis{478,
  abstract     = {{Software systems are playing an increasing role in our everyday life, and as the amount of software applications grows, so does their complexity and the relevance of their computations. Software components can be found in many systems that are charged with safety-critical tasks, such as control systems for aviation or power plants. Hence, software verification techniques that are capable of proving the absence of critical errors are becoming more and more important in the field software engineering. A well-established approach to software verification is model checking. Applying this technique involves an exhaustive exploration of a state space model corresponding to the system under consideration. The major challenge in model checking is the so-called state explosion problem: The state space of a software system grows exponentially with its size. Thus, the straightforward modelling of real-life systems practically impossible. A common approach to this problem is the application of abstraction techniques, which reduce the original state space by mapping it on a significantly smaller abstract one. Abstraction inherently involves a loss of information, and thus, the resulting abstract model may be too imprecise for a definite result in verification. Therefore, abstraction is typically combined with abstraction refinement: An initially very coarse abstract model is iteratively refined, i.e. enriched with new details about the original system, until a level of abstraction is reached that is precise enough for a definite outcome. Abstraction refinement-based model checking is fully automatable and it is considered as one of the most promising approaches to the state explosion problem in verification. However, it is still faced with a number of challenges. There exist several types of abstraction techniques and not every type is equally well-suited for all kinds of systems and verification tasks. Moreover, the selection of adequate refinement steps is nontrivial and typically the most crucial part of the overall approach: Unfavourable refinement decisions can compromise the state space-reducing effect of abstraction, and as a consequence, can easily lead to the failure of verification. It is, however, hard to predict which refinement steps will eventually be expedient for verification – and which not.}},
  author       = {{Timm, Nils}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Three-Valued Abstraction and Heuristic-Guided Refinement for Verifying Concurrent Systems}}},
  year         = {{2013}},
}

@inproceedings{481,
  abstract     = {{Cloud computing offers high availability, dynamic scalability, and elasticity requiring only very little administration. However, this service comes with financial costs. Peer-to-peer systems, in contrast, operate at very low costs but cannot match the quality of service of the cloud. This paper focuses on the case study of Wikipedia and presents an approach to reduce the operational costs of hosting similar websites in the cloud by using a practical peer-to-peer approach. The visitors of the site are joining a Chord overlay, which acts as first cache for article lookups. Simulation results show, that up to 72% of the article lookups in Wikipedia could be answered by other visitors instead of using the cloud.}},
  author       = {{Graffi, Kalman and Bremer, Lars}},
  booktitle    = {{Proceedings of the International Conference on Communications (ICC'13)}},
  pages        = {{3444 -- 3449 }},
  title        = {{{Symbiotic Coupling of P2P and Cloud Systems: The Wikipedia Case}}},
  doi          = {{10.1109/ICC.2013.6655082}},
  year         = {{2013}},
}

@inproceedings{484,
  abstract     = {{One of the main ideas of Service-Oriented Computing (SOC) is the delivery of flexibly composable services provided on world-wide markets. For a successful service discovery,service requests have to be matched with the available service offers. However, in a situation in which no service that completely matches the request can be discovered, the customer may tolerate slight discrepancies between request and offer. Some existing fuzzy matching approaches are able to detectsuch service variants, but they do not allow to explicitly specify which parts of a request are not mandatory. In this paper, we improve an existing service matching approach based onVisual Contracts leveraging our preliminary work of design pattern detection. Thereby, we support explicit specifications of service variants and realize gradual matching results that can be ranked in order to discover the service offer that matches a customer’s request best.}},
  author       = {{Platenius, Marie Christin and von Detten, Markus and Gerth, Christian and Schäfer, Wilhelm and Engels, Gregor}},
  booktitle    = {{IEEE 20th International Conference on Web Services (ICWS 2013)}},
  pages        = {{613--614}},
  title        = {{{Service Matching under Consideration of Explicitly Specified Service Variants}}},
  doi          = {{10.1109/ICWS.2013.98}},
  year         = {{2013}},
}

@inproceedings{485,
  abstract     = {{Software composition has been studied as a subject of state based planning for decades. Existing composition approaches that are efficient enough to be used in practice are limited to sequential arrangements of software components. This restriction dramatically reduces the number of composition problems that can be solved. However, there are many composition problems that could be solved by existing approaches if they had a possibility to combine components in very simple non-sequential ways. To this end, we present an approach that arranges not only basic components but also composite components. Composite components enhance the structure of the composition by conditional control flows. Through algorithms that are written by experts, composite components are automatically generated before the composition process starts. Therefore, our approach is not a substitute for existing composition algorithms but complements them with a preprocessing step. We verified the validity of our approach through implementation of the presented algorithms.}},
  author       = {{Mohr, Felix and Kleine Büning, Hans}},
  booktitle    = {{Proceedings of the 15th International Conference on Information Integration and Web-based Applications & Services (iiWAS)}},
  pages        = {{676--680}},
  title        = {{{Semi-Automated Software Composition Through Generated Components}}},
  doi          = {{10.1145/2539150.2539235}},
  year         = {{2013}},
}

@misc{486,
  author       = {{Otte, Oliver}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Seitenkanalresistenz paarungsbasierter Kryptographie}}},
  year         = {{2013}},
}

@misc{487,
  author       = {{Bobolz, Jan}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Security Proofs for Pairing-Based Cryptography in the Generic Group Model}}},
  year         = {{2013}},
}

@inproceedings{488,
  abstract     = {{Unattended systems are key ingredients of various critical infrastruc-tures like networks of self service terminals or automated teller machines.For cost and efficiency reasons they should mostly run autonomously.Unattended systems are attractive and lucrative targets for various kindsof attacks, including attacks on the integrity of their components and thecommunication between components. In this paper, we propose a gen-eral cryptographic framework to protect unattended systems. We alsodemonstrate that instantiating the framework with techniques from iden-tity based cryptography is particularly well-suited to efficiently secureunattended systems.}},
  author       = {{Blömer, Johannes and Günther, Peter and Krummel, Volker}},
  booktitle    = {{Proceedings of the 5th International Conference on Mathematical Aspects of Computer and Information Sciences (MACIS)}},
  pages        = {{98--105}},
  title        = {{{Securing Critical Unattended Systems with Identity Based Cryptography - A Case Study}}},
  year         = {{2013}},
}

@misc{490,
  author       = {{Wallaschek, Felix}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Routing in heterogenen OpenFlow Netzwerken}}},
  year         = {{2013}},
}

@misc{492,
  author       = {{Robbert, Christoph}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Ressource-Optimized Deployment of Multi-Tier Applications - The Data Rate-Constrained Case}}},
  year         = {{2013}},
}

@phdthesis{494,
  abstract     = {{The maintenance of component-based software systems requires up-to-date models of their concrete architecture, i.e. the architecture that is realised in the source code. These models help in systematically planning, analysing and executing typical reengineering activities. Often no or only outdated architectural models of such systems exist. Therefore, various reverse engineering methods have been developed which try to recover a system's components, subsystems and connectors. However, these reverse engineering methods are severely impacted by design deciencies in the system's code base, especially violations of the component encapsulation. As long as design deciencies are not considered in the reverse engineering process, they reduce the quality of the recovered component structures. Despite this impact of design deciencies, no existing architecture reconstruction approach explicitly integrates a systematic deciency detection and removal into the recovery process. Therefore, I have developed Archimetrix. Archimetrix is a tool-supported architecture reconstruction process. It enhances a clustering-based architecture recovery approach with an extensible, pattern-based deciency detection. After the detection of deciencies, Archimetrix supports the software architect in removing the de ciencies and provides the means to preview the architectural consequences of such a removal. I also provide a process to identify and formalise additional deciencies. I validated the approach on three case studies which show that Archimetrix is able to identify relevant deciencies and that the removal of these deciencies leads to an increased quality of the recovered architectures, i.e. they are closer to the corresponding conceptual architectures.}},
  author       = {{von Detten, Markus}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Reengineering of Component-Based Software Systems in the Presence of Design Deficiencies}}},
  year         = {{2013}},
}

@inproceedings{495,
  abstract     = {{Automated service composition has been studied as a subject of state based planning for a decade. A great deal of service composition tasks can only be solved if concrete output values of the services are considered in the composition process. However, the fact that those values are not known before runtime leads to nondeterministic planning problems, which have proven to be notoriously difficult in practical automated service composition applications. Even though this problem is frequently recognized, it has still received remarkably few attention and remains unsolved.This paper shows how nondeterminism in automated service composition can be reduced. We introduce context rules as a means to derive semantic knowledge from output values of services. These rules enable us to replace nondeterministic composition operations by less nondeterministic or even completely deterministic ones. We show the validity of our solutions not only theoretically but also have evaluated them practically through implementation.}},
  author       = {{Mohr, Felix and Lettmann, Theodor and Kleine Büning, Hans}},
  booktitle    = {{Proceedings of the 6th International Conference on Service Oriented Computing and Applications (SOCA)}},
  pages        = {{154--161}},
  title        = {{{Reducing Nondeterminism in Automated Service Composition}}},
  doi          = {{10.1109/SOCA.2013.25}},
  year         = {{2013}},
}

@inproceedings{496,
  abstract     = {{Within reactive topology control, a node determines its adjacent edges of a network subgraph without prior knowledge of its neighborhood. The goal is to construct a local view on a topology which provides certain desired properties such as planarity. During algorithm execution, a node, in general, is not allowed to determine all its neighbors of the network graph. There are well-known reactive algorithms for computing planar subgraphs. However, the subgraphs obtained do not have constant Euclidean spanning ratio. This means that routing along these subgraphs may result in potentially long detours. So far, it has been unknown if planar spanners can be constructed reactively. In this work, we show that at least under the unit disk network model, this is indeed possible, by proposing an algorithm for reactive construction of the partial Delaunay triangulation, which recently turned out to be a spanner. Furthermore, we show that our algorithm is message-optimal as a node will only exchange messages with nodes that are also neighbors in the spanner. The algorithm’s presentation is complemented by a rigorous proof of correctness.}},
  author       = {{Benter, Markus and Neumann, Florentin and Frey, Hannes}},
  booktitle    = {{Proceedings of the 32nd IEEE International Conference on Computer Communications (INFOCOM)}},
  pages        = {{2193--2201}},
  title        = {{{Reactive Planar Spanner Construction in Wireless Ad Hoc and Sensor Networks}}},
  doi          = {{10.1109/INFCOM.2013.6567022}},
  year         = {{2013}},
}

@inproceedings{498,
  abstract     = {{Proof-carrying code approaches aim at safe execution of untrusted code by having the code producer attach a safety proof to the code which the code consumer only has to validate. Depending on the type of safety property, proofs can however become quite large and their validation - though faster than their construction - still time consuming. In this paper we introduce a new concept for safe execution of untrusted code. It keeps the idea of putting the time consuming part of proving on the side of the code producer, however, attaches no proofs to code anymore but instead uses the proof to transform the program into an equivalent but more eﬃciently veriﬁable program. Code consumers thus still do proving themselves, however, on a computationally inexpensive level only. Experimental results show that the proof eﬀort can be reduced by several orders of magnitude, both with respect to time and space.}},
  author       = {{Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 25th International Conference on Computer Aided Verification (CAV)}},
  pages        = {{912--927}},
  title        = {{{Programs from Proofs – A PCC Alternative}}},
  doi          = {{10.1007/978-3-642-39799-8_65}},
  year         = {{2013}},
}

@inproceedings{499,
  abstract     = {{We present a new online algorithm for profit-oriented scheduling on multiple speed-scalable processors.Moreover, we provide a tight analysis of the algorithm's competitiveness.Our results generalize and improve upon work by \citet{Chan:2010}, which considers a single speed-scalable processor.Using significantly different techniques, we can not only extend their model to multiprocessors but also prove an enhanced and tight competitive ratio for our algorithm.In our scheduling problem, jobs arrive over time and are preemptable.They have different workloads, values, and deadlines.The scheduler may decide not to finish a job but instead to suffer a loss equaling the job's value.However, to process a job's workload until its deadline the scheduler must invest a certain amount of energy.The cost of a schedule is the sum of lost values and invested energy.In order to finish a job the scheduler has to determine which processors to use and set their speeds accordingly.A processor's energy consumption is power $\Power{s}$ integrated over time, where $\Power{s}=s^{\alpha}$ is the power consumption when running at speed $s$.Since we consider the online variant of the problem, the scheduler has no knowledge about future jobs.This problem was introduced by~\citet{Chan:2010} for the case of a single processor.They presented an online algorithm which is $\alpha^{\alpha}+2e\alpha$-competitive.We provide an online algorithm for the case of multiple processors with an improved competitive ratio of $\alpha^{\alpha}$.}},
  author       = {{Kling, Peter and Pietrzyk, Peter}},
  booktitle    = {{Proceedings of the 25th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)}},
  pages        = {{251--260 }},
  title        = {{{Profitable Scheduling on Multiple Speed-Scalable Processors}}},
  doi          = {{10.1145/2486159.2486183}},
  year         = {{2013}},
}

@article{7271,
  author       = {{de Lemos, Rogrio and Giese, Holger and A. Müller, Hausi and Shaw, Mary and Andersson, Jesper and Litoiu, Marin and Schmerl, Bradley and Tamura, Gabriel and M. Villegas, Norha and Vogel, Thomas and Weyns, Danny and Baresi, Luciano and Becker, Basil and Bencomo, Nelly and Brun, Yuriy and Cukic, Bojan and Desmarais, Ron and Dustdar, Schahram and Engels, Gregor and Geihs, Kurt and M. Göschka, Karl and Gorla, Alessandra and Grassi, Vincenzo and Inverardi, Paola and Karsai, Gabor and Kramer, Jeff and Lopes, Antonia and Magee, Jeff and Malek, Sam and Mankovskii, Serge and Mirandola, Raffaela and Mylopoulos, John and Nierstrasz, Oscar and Pezzè, Mauro and Prehofer, Christian and Schäfer, Wilhelm and Schlichting, Rick and B. Smith, Dennis and Pedro Sousa, Joao and Tahvildari, Ladan and Wong, Kenny and Wuttke, Jochen}},
  journal      = {{Software Engineering for Self-Adaptive Systems II}},
  pages        = {{1--32}},
  title        = {{{Software Engineering for Self-Adaptive Systems: A Second Research Roadmap}}},
  doi          = {{10.1007/978-3-642-35813-5_1}},
  year         = {{2013}},
}

@article{7272,
  author       = {{Bouillon, Elke and Güldali, Baris and Herrmann, Andrea and Keuler, Thorsten and Moldt, Daniel and Riebisch, Matthias}},
  journal      = {{Softwaretechnik-Trends}},
  number       = {{1}},
  pages        = {{29--30}},
  title        = {{{Leichtgewichtige Traceability im agilen Entwicklungsprozess am Beispiel von Scrum}}},
  volume       = {{33}},
  year         = {{2013}},
}

@article{7273,
  author       = {{Grieger, Marvin and Güldali, Baris and Sauer, Stefan and Mlynarski, Michael}},
  journal      = {{OBJEKTspektrum (Online Themenspecials)}},
  pages        = {{1--4}},
  title        = {{{Testen bei Migrationsprojekten}}},
  year         = {{2013}},
}

@article{7274,
  author       = {{Fazal-Baqaie, Masud and Güldali, Baris and Luckey, Markus and Sauer, Stefan and Spijkerman, Michael}},
  journal      = {{OBJEKTspektrum (Online Themenspecials)}},
  number       = {{RE/2013}},
  pages        = {{1--5}},
  title        = {{{Maßgeschneidert und werkzeugunterstützt Entwickeln angepasster Requirements Engineering-Methoden}}},
  year         = {{2013}},
}

@article{7275,
  author       = {{Engels, Gregor and Luckey, Markus}},
  journal      = {{Computer Science - Research and Development}},
  number       = {{1}},
  pages        = {{1--2}},
  title        = {{{Editorial}}},
  volume       = {{28}},
  year         = {{2013}},
}

@article{7276,
  author       = {{Engels, Gregor and Gerth, Christian and Kleinjohann, Bernd and Kleinjohann, Lisa and Müller, Wolfgang and Sauer, Stefan}},
  journal      = {{Forschungsforum Paderborn}},
  pages        = {{54--61}},
  title        = {{{Informationstechnik spart Ressourcen}}},
  volume       = {{16/2013}},
  year         = {{2013}},
}

