@misc{486,
  author       = {{Otte, Oliver}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Seitenkanalresistenz paarungsbasierter Kryptographie}}},
  year         = {{2013}},
}

@misc{487,
  author       = {{Bobolz, Jan}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Security Proofs for Pairing-Based Cryptography in the Generic Group Model}}},
  year         = {{2013}},
}

@inproceedings{488,
  abstract     = {{Unattended systems are key ingredients of various critical infrastruc-tures like networks of self service terminals or automated teller machines.For cost and efficiency reasons they should mostly run autonomously.Unattended systems are attractive and lucrative targets for various kindsof attacks, including attacks on the integrity of their components and thecommunication between components. In this paper, we propose a gen-eral cryptographic framework to protect unattended systems. We alsodemonstrate that instantiating the framework with techniques from iden-tity based cryptography is particularly well-suited to efficiently secureunattended systems.}},
  author       = {{Blömer, Johannes and Günther, Peter and Krummel, Volker}},
  booktitle    = {{Proceedings of the 5th International Conference on Mathematical Aspects of Computer and Information Sciences (MACIS)}},
  pages        = {{98--105}},
  title        = {{{Securing Critical Unattended Systems with Identity Based Cryptography - A Case Study}}},
  year         = {{2013}},
}

@misc{490,
  author       = {{Wallaschek, Felix}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Routing in heterogenen OpenFlow Netzwerken}}},
  year         = {{2013}},
}

@misc{492,
  author       = {{Robbert, Christoph}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Ressource-Optimized Deployment of Multi-Tier Applications - The Data Rate-Constrained Case}}},
  year         = {{2013}},
}

@phdthesis{494,
  abstract     = {{The maintenance of component-based software systems requires up-to-date models of their concrete architecture, i.e. the architecture that is realised in the source code. These models help in systematically planning, analysing and executing typical reengineering activities. Often no or only outdated architectural models of such systems exist. Therefore, various reverse engineering methods have been developed which try to recover a system's components, subsystems and connectors. However, these reverse engineering methods are severely impacted by design deciencies in the system's code base, especially violations of the component encapsulation. As long as design deciencies are not considered in the reverse engineering process, they reduce the quality of the recovered component structures. Despite this impact of design deciencies, no existing architecture reconstruction approach explicitly integrates a systematic deciency detection and removal into the recovery process. Therefore, I have developed Archimetrix. Archimetrix is a tool-supported architecture reconstruction process. It enhances a clustering-based architecture recovery approach with an extensible, pattern-based deciency detection. After the detection of deciencies, Archimetrix supports the software architect in removing the de ciencies and provides the means to preview the architectural consequences of such a removal. I also provide a process to identify and formalise additional deciencies. I validated the approach on three case studies which show that Archimetrix is able to identify relevant deciencies and that the removal of these deciencies leads to an increased quality of the recovered architectures, i.e. they are closer to the corresponding conceptual architectures.}},
  author       = {{von Detten, Markus}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Reengineering of Component-Based Software Systems in the Presence of Design Deficiencies}}},
  year         = {{2013}},
}

@inproceedings{495,
  abstract     = {{Automated service composition has been studied as a subject of state based planning for a decade. A great deal of service composition tasks can only be solved if concrete output values of the services are considered in the composition process. However, the fact that those values are not known before runtime leads to nondeterministic planning problems, which have proven to be notoriously difficult in practical automated service composition applications. Even though this problem is frequently recognized, it has still received remarkably few attention and remains unsolved.This paper shows how nondeterminism in automated service composition can be reduced. We introduce context rules as a means to derive semantic knowledge from output values of services. These rules enable us to replace nondeterministic composition operations by less nondeterministic or even completely deterministic ones. We show the validity of our solutions not only theoretically but also have evaluated them practically through implementation.}},
  author       = {{Mohr, Felix and Lettmann, Theodor and Kleine Büning, Hans}},
  booktitle    = {{Proceedings of the 6th International Conference on Service Oriented Computing and Applications (SOCA)}},
  pages        = {{154--161}},
  title        = {{{Reducing Nondeterminism in Automated Service Composition}}},
  doi          = {{10.1109/SOCA.2013.25}},
  year         = {{2013}},
}

@inproceedings{496,
  abstract     = {{Within reactive topology control, a node determines its adjacent edges of a network subgraph without prior knowledge of its neighborhood. The goal is to construct a local view on a topology which provides certain desired properties such as planarity. During algorithm execution, a node, in general, is not allowed to determine all its neighbors of the network graph. There are well-known reactive algorithms for computing planar subgraphs. However, the subgraphs obtained do not have constant Euclidean spanning ratio. This means that routing along these subgraphs may result in potentially long detours. So far, it has been unknown if planar spanners can be constructed reactively. In this work, we show that at least under the unit disk network model, this is indeed possible, by proposing an algorithm for reactive construction of the partial Delaunay triangulation, which recently turned out to be a spanner. Furthermore, we show that our algorithm is message-optimal as a node will only exchange messages with nodes that are also neighbors in the spanner. The algorithm’s presentation is complemented by a rigorous proof of correctness.}},
  author       = {{Benter, Markus and Neumann, Florentin and Frey, Hannes}},
  booktitle    = {{Proceedings of the 32nd IEEE International Conference on Computer Communications (INFOCOM)}},
  pages        = {{2193--2201}},
  title        = {{{Reactive Planar Spanner Construction in Wireless Ad Hoc and Sensor Networks}}},
  doi          = {{10.1109/INFCOM.2013.6567022}},
  year         = {{2013}},
}

@inproceedings{498,
  abstract     = {{Proof-carrying code approaches aim at safe execution of untrusted code by having the code producer attach a safety proof to the code which the code consumer only has to validate. Depending on the type of safety property, proofs can however become quite large and their validation - though faster than their construction - still time consuming. In this paper we introduce a new concept for safe execution of untrusted code. It keeps the idea of putting the time consuming part of proving on the side of the code producer, however, attaches no proofs to code anymore but instead uses the proof to transform the program into an equivalent but more eﬃciently veriﬁable program. Code consumers thus still do proving themselves, however, on a computationally inexpensive level only. Experimental results show that the proof eﬀort can be reduced by several orders of magnitude, both with respect to time and space.}},
  author       = {{Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 25th International Conference on Computer Aided Verification (CAV)}},
  pages        = {{912--927}},
  title        = {{{Programs from Proofs – A PCC Alternative}}},
  doi          = {{10.1007/978-3-642-39799-8_65}},
  year         = {{2013}},
}

@inproceedings{499,
  abstract     = {{We present a new online algorithm for profit-oriented scheduling on multiple speed-scalable processors.Moreover, we provide a tight analysis of the algorithm's competitiveness.Our results generalize and improve upon work by \citet{Chan:2010}, which considers a single speed-scalable processor.Using significantly different techniques, we can not only extend their model to multiprocessors but also prove an enhanced and tight competitive ratio for our algorithm.In our scheduling problem, jobs arrive over time and are preemptable.They have different workloads, values, and deadlines.The scheduler may decide not to finish a job but instead to suffer a loss equaling the job's value.However, to process a job's workload until its deadline the scheduler must invest a certain amount of energy.The cost of a schedule is the sum of lost values and invested energy.In order to finish a job the scheduler has to determine which processors to use and set their speeds accordingly.A processor's energy consumption is power $\Power{s}$ integrated over time, where $\Power{s}=s^{\alpha}$ is the power consumption when running at speed $s$.Since we consider the online variant of the problem, the scheduler has no knowledge about future jobs.This problem was introduced by~\citet{Chan:2010} for the case of a single processor.They presented an online algorithm which is $\alpha^{\alpha}+2e\alpha$-competitive.We provide an online algorithm for the case of multiple processors with an improved competitive ratio of $\alpha^{\alpha}$.}},
  author       = {{Kling, Peter and Pietrzyk, Peter}},
  booktitle    = {{Proceedings of the 25th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)}},
  pages        = {{251--260 }},
  title        = {{{Profitable Scheduling on Multiple Speed-Scalable Processors}}},
  doi          = {{10.1145/2486159.2486183}},
  year         = {{2013}},
}

@article{7271,
  author       = {{de Lemos, Rogrio and Giese, Holger and A. Müller, Hausi and Shaw, Mary and Andersson, Jesper and Litoiu, Marin and Schmerl, Bradley and Tamura, Gabriel and M. Villegas, Norha and Vogel, Thomas and Weyns, Danny and Baresi, Luciano and Becker, Basil and Bencomo, Nelly and Brun, Yuriy and Cukic, Bojan and Desmarais, Ron and Dustdar, Schahram and Engels, Gregor and Geihs, Kurt and M. Göschka, Karl and Gorla, Alessandra and Grassi, Vincenzo and Inverardi, Paola and Karsai, Gabor and Kramer, Jeff and Lopes, Antonia and Magee, Jeff and Malek, Sam and Mankovskii, Serge and Mirandola, Raffaela and Mylopoulos, John and Nierstrasz, Oscar and Pezzè, Mauro and Prehofer, Christian and Schäfer, Wilhelm and Schlichting, Rick and B. Smith, Dennis and Pedro Sousa, Joao and Tahvildari, Ladan and Wong, Kenny and Wuttke, Jochen}},
  journal      = {{Software Engineering for Self-Adaptive Systems II}},
  pages        = {{1--32}},
  title        = {{{Software Engineering for Self-Adaptive Systems: A Second Research Roadmap}}},
  doi          = {{10.1007/978-3-642-35813-5_1}},
  year         = {{2013}},
}

@article{7272,
  author       = {{Bouillon, Elke and Güldali, Baris and Herrmann, Andrea and Keuler, Thorsten and Moldt, Daniel and Riebisch, Matthias}},
  journal      = {{Softwaretechnik-Trends}},
  number       = {{1}},
  pages        = {{29--30}},
  title        = {{{Leichtgewichtige Traceability im agilen Entwicklungsprozess am Beispiel von Scrum}}},
  volume       = {{33}},
  year         = {{2013}},
}

@article{7273,
  author       = {{Grieger, Marvin and Güldali, Baris and Sauer, Stefan and Mlynarski, Michael}},
  journal      = {{OBJEKTspektrum (Online Themenspecials)}},
  pages        = {{1--4}},
  title        = {{{Testen bei Migrationsprojekten}}},
  year         = {{2013}},
}

@article{7274,
  author       = {{Fazal-Baqaie, Masud and Güldali, Baris and Luckey, Markus and Sauer, Stefan and Spijkerman, Michael}},
  journal      = {{OBJEKTspektrum (Online Themenspecials)}},
  number       = {{RE/2013}},
  pages        = {{1--5}},
  title        = {{{Maßgeschneidert und werkzeugunterstützt Entwickeln angepasster Requirements Engineering-Methoden}}},
  year         = {{2013}},
}

@article{7275,
  author       = {{Engels, Gregor and Luckey, Markus}},
  journal      = {{Computer Science - Research and Development}},
  number       = {{1}},
  pages        = {{1--2}},
  title        = {{{Editorial}}},
  volume       = {{28}},
  year         = {{2013}},
}

@article{7276,
  author       = {{Engels, Gregor and Gerth, Christian and Kleinjohann, Bernd and Kleinjohann, Lisa and Müller, Wolfgang and Sauer, Stefan}},
  journal      = {{Forschungsforum Paderborn}},
  pages        = {{54--61}},
  title        = {{{Informationstechnik spart Ressourcen}}},
  volume       = {{16/2013}},
  year         = {{2013}},
}

@article{7277,
  author       = {{Gerth, Christian and Küster, Jochen and Luckey, Markus and Engels, Gregor}},
  journal      = {{Software and Systems Modeling}},
  number       = {{3}},
  pages        = {{517--535}},
  title        = {{{Detection and Resolution of Conflicting Change Operations in Version Management of Process Models}}},
  volume       = {{12}},
  year         = {{2013}},
}

@article{7278,
  abstract     = {{Modellbasiertes Testen verspricht potenziell eine höhere Effizienz und Effektivität im Testprozess. Ob im eigenen Kontext der Einsatz wirtschaftlich ist, ist jedoch häufig unklar. Dieser Beitrag analysiert systematisch Kosten- und Nutzenfaktoren und stellt ein Verfahren zur Abschätzung der Wirtschaftlichkeit des modellbasierten Testens vor. Anhand eines Beispiels wird der Ablauf veranschaulicht.}},
  author       = {{Faragó, David and Törsel, Arne-Michael and Mlynarski, Michael and Weißleder, Stephan and Güldali, Baris and Brandes, Christian}},
  journal      = {{OBJEKTspektrum}},
  pages        = {{32--38}},
  title        = {{{Wirtschaftlichkeitsberechnung für MBT: Wann sich modellbasiertes Testen lohnt}}},
  volume       = {{4}},
  year         = {{2013}},
}

@inbook{7523,
  author       = {{Engels, Gregor and Sauer, Stefan}},
  booktitle    = {{Handbook of Software Engineering and Knowledge Engineering}},
  editor       = {{Chang, S K}},
  isbn         = {{9789810245146}},
  pages        = {{21----53}},
  publisher    = {{World Scientific Publishing Company}},
  title        = {{{Object-oriented Modeling of Multimedia Applications}}},
  doi          = {{https://doi.org/10.1142/4603}},
  volume       = {{2}},
  year         = {{2013}},
}

@phdthesis{7569,
  abstract     = {{Dynamic Meta Modeling (DMM) is a semantics specification technique targeted at MOF-based modeling languages, where a language's behavior is defined by means of graphical operational rules which change runtime models. The DMM approach has first been suggested by Engels et al. in 2000; Hausmann has then defined the DMM language on a conceptual level within his PhD thesis in 2006. Consequently, the next step was to bring the existing DMM concepts alive, and then to apply them to different modeling languages, making use of the lessons learned to improve the DMM concepts as well as the DMM tooling. The result of this process is the DMM++ method, which is presented within this thesis. Our contributions are three-fold: First, and according to our experiences with the DMM language, we have introduced new concepts such as refinement by means of rule overriding, and we have strengthened existing concepts such as the dealing with universally quantified structures or attributes. Second, we have developed a test-driven process for semantics specification: A set of test models is created, and their expected behavior is fixed. Then, the DMM rules are created incrementally, finally resulting in a DMM ruleset realizing at least the expected behavior of the test models. Additionally, we have defined a set of coverage criteria for DMM rulesets which allow to measure the quality of a set of test models. Third, we have shown how functional as well as non-functional requirements can be formulated against models and their DMM specifications. The former is achieved by providing a visual language for formulating temporal logic properties, which are then verified with model checking techniques, and by allowing for visual debugging of models failing a requirement. For the latter, the modeler can add performance information to models and analyze their performance properties, e.g. average throughput.}},
  author       = {{Soltenborn, Christian}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Quality Assurance with Dynamic Meta Modeling}}},
  doi          = {{http://nbn-resolving.de/urn:nbn:de:hbz:466:2-12420}},
  year         = {{2013}},
}

