@misc{185,
  author       = {{Schmidt, Christopher}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Kundenbewertungen im Online-Handel – Alles Betrug?}}},
  year         = {{2016}},
}

@inproceedings{186,
  abstract     = {{Software verification is an established method to ensure software safety. Nevertheless, verification still often fails, either because it consumes too much resources, e.g., time or memory, or the technique is not mature enough to verify the property. Often then discarding the partial verification, the validation process proceeds with techniques like testing.To enable standard testing to profit from previous, partial verification, we use a summary of the verification effort to simplify the program for subsequent testing. Our techniques use this summary to construct a residual program which only contains program paths with unproven assertions. Afterwards, the residual program can be used with standard testing tools.Our first experiments show that testing profits from the partial verification.The test effort is reduced and combined verification and testing is faster than a complete verification.}},
  author       = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}},
  booktitle    = {{Software Engineering 2016}},
  editor       = {{Jens Knoop, Uwe Zdun}},
  pages        = {{17--18}},
  title        = {{{Just test what you cannot verify!}}},
  year         = {{2016}},
}

@techreport{221,
  author       = {{Platenius, Marie Christin and Josifovska, Klementina and van Rooijen, Lorijn and Arifulina, Svetlana and Becker, Matthias and Engels, Gregor and Schäfer, Wilhelm}},
  publisher    = {{Universität Paderborn}},
  title        = {{{An Overview of Service Specification Language and Matching in On-The-Fly Computing (v0.3)}}},
  year         = {{2016}},
}

@article{222,
  abstract     = {{Virtual field programmable gate arrays (FPGA) are overlay architectures realized on top of physical FPGAs. They are proposed to enhance or abstract away from the physical FPGA for experimenting with novel architectures and design tool flows. In this paper, we present an embedding of a ZUMA-based virtual FPGA fabric into a complete configurable system-on-chip. Such an embedding is required to fully harness the potential of virtual FPGAs, in particular to give the virtual circuits access to main memory and operating system services, and to enable a concurrent operation of virtualized and non-virtualized circuitry. We discuss our extension to ZUMA and its embedding into the ReconOS operating system for hardware/software systems. Furthermore, we present an open source tool flow to synthesize configurations for the virtual FPGA, along with an analysis of the area and delay overheads involved.}},
  author       = {{Wiersema, Tobias and Bockhorn, Arne and Platzner, Marco}},
  journal      = {{Computers & Electrical Engineering}},
  pages        = {{112----122}},
  publisher    = {{Elsevier}},
  title        = {{{An Architecture and Design Tool Flow for Embedding a Virtual FPGA into a Reconfigurable System-on-Chip}}},
  doi          = {{10.1016/j.compeleceng.2016.04.005}},
  year         = {{2016}},
}

@misc{223,
  abstract     = {{We consider the problem of aggregation in overlay networks. We use a synchronous time model in which each node has polylogarithmic memory and can send at most a polylogarithmic number of messages per round. We investigate how to quickly compute the result of an aggregate functionf over elements that are distributed among the nodes of the network such that the result is eventually known by a selected root node. We show how to compute distributive aggregate functions such as SUM, MAX, and OR in time $O(\log n / \log\log n)$ using a tree that is created in a pre-processing phase. If only a polylogarithmic number of data items need to be aggregated, we show how to compute the result in time $O(\sqrt{\log n / \log\log n})$. Furthermore, we show how to compute holistic aggregate functions such as DISTINCT, SMALLEST(k) and MODE(k) in time $O(\log n / \log\log n)$. Finally, we show a lower bound of $\Omega(\sqrt{\log n / \log\log n})$ for deterministic algorithms that compute any of the aggregate functions in the scope of the thesis.}},
  author       = {{Hinnenthal, Kristian}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Aggregation in Overlay Networks}}},
  year         = {{2016}},
}

@inproceedings{224,
  abstract     = {{In modern software development, paradigms like component-based software engineering (CBSE) and service-oriented architectures (SOA) emphasize the construction of large software systems out of existing components or services. Therein, a service is a self-contained piece of software, which adheres to a specified interface. In a model-based software design, this interface constitutes our sole knowledge of the service at design time, while service implementations are not available. Therefore, correctness checks or detection of potential errors in service compositions has to be carried out without the possibility of executing services. This challenges the usage of standard software error localization techniques for service compositions. In this paper, we review state-of-the-art approaches for error localization of software and discuss their applicability to service compositions.}},
  author       = {{Krämer, Julia and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC 2016)}},
  pages        = {{248----262}},
  title        = {{{A short survey on using software error localization for service compositions}}},
  doi          = {{10.1007/978-3-319-44482-6_16}},
  year         = {{2016}},
}

@inproceedings{225,
  abstract     = {{Image Processing is fundamental for any camera-based vision system. In order to automate the prototyping process of image processing solutions to some extend, we propose a holistic, adaptive approach that comprises concepts for specification, composition, recommendation, execution, and rating of image processing functionality. The fundamental idea is to realize image processing applications according to Service-oriented Computing design principles. That is, distinct image processing functionality is encapsulated in terms of stateless services. Services are then used as building blocks for more complex image processing functionality. To automatically compose complex image processing functionality, our proposed approach incorporates a flexible, Artificial Intelligence planning-based forward search algorithm. Decision-making between alternative composition steps is supported by a learning recommendation system, which keeps track of valid composition steps by automatically constructing a composition grammar. In addition, it adapts to solutions of high quality by means of feedback-based Reinforcement Learning techniques. A concrete use case serves as proof of concept and demonstrates the feasibility of our holistic, adaptive approach.}},
  author       = {{Jungmann, Alexander and Kleinjohann, Bernd}},
  booktitle    = {{Proceedings of the 21st IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)}},
  pages        = {{1----8}},
  title        = {{{A Holistic and Adaptive Approach for Automated Prototyping of Image Processing Functionality}}},
  doi          = {{10.1109/ETFA.2016.7733522}},
  year         = {{2016}},
}

@inproceedings{226,
  abstract     = {{Error detection, localization and correction are time-intensive tasks in software development, but crucial to deliver functionally correct products. Thus, automated approaches to these tasks have been intensively studied for standard software systems. For model-based software systems, the situation is different. While error detection is still well-studied, error localization and correction is a less-studied domain. In this paper, we examine error localization and correction for models of service compositions. Based on formal definitions of error and correction in this context, we show that the classical approach of error localization and correction, i.e. first determining a set of suspicious statements and then proposing changes to these statements, is ineffective in our context. In fact, it lessens the chance to succeed in finding a correction at all.In this paper, we introduce correction proposal as a novel approach on error correction in service compositions integrating error localization and correction in one combined step. In addition, we provide an algorithm to compute such correction proposals automatically.}},
  author       = {{Krämer, Julia and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 1st International Workshop on Formal to Practical Software Verification and Composition (VeryComp 2016)}},
  pages        = {{445----457}},
  title        = {{{A Formal Approach to Error Localization and Correction in Service Compositions}}},
  doi          = {{10.1007/978-3-319-50230-4_35}},
  year         = {{2016}},
}

@inproceedings{227,
  abstract     = {{Information flow analysis studies the flow of data between program entities (e.g. variables), where the allowed flow is specified via security policies. Typical information flow analyses compute a conservative (over-)approximation of the flows in a program. Such an analysis may thus signal non-existing violations of the security policy.In this paper, we propose a new technique for inspecting the reported violations (counterexamples) for spuriousity. Similar to counterexample-guided-abstraction-refinement (CEGAR) in software verification, we use the result of this inspection to improve the next round of the analysis. We prove soundness of this scheme.}},
  author       = {{Töws, Manuel and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 18th International Conference on Formal Engineering Methods (ICFEM 2016)}},
  pages        = {{466----483}},
  title        = {{{A CEGAR Scheme for Information Flow Analysis}}},
  doi          = {{10.1007/978-3-319-47846-3_29}},
  year         = {{2016}},
}

@misc{2271,
  author       = {{Hesso, Aref}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Die Rolle der Reputation: Die Entstehung, der Aufbau, die Messung und die Auswirkung der Reputation auf Kunden und Unternehmen}}},
  year         = {{2016}},
}

@inproceedings{217,
  abstract     = {{Today, cloud vendors host third party black-box services, whose developers usually provide only textual descriptions or purely syntactical interface specifications. Cloud vendors that give substantial support to other third party developers to integrate hosted services into new software solutions would have a unique selling feature over their competitors. However, to reliably determine if a service is reusable, comprehensive service specifications are needed. Characteristic for comprehensive in contrast to syntactical specifications are the formalization of ontological and behavioral semantics, homogeneity according to a global ontology, and a service grounding that links the abstract service description and its technical realization. Homogeneous, semantical specifications enable to reliably identify reusable services, whereas the service grounding is needed for the technical service integration. In general, comprehensive specifications are not availableand have to be derived. Existing automatized approaches are restricted to certain characteristics of comprehensiveness. In my PhD, I consider an automatized approach to derive fully-fledged comprehensive specifications for black-box services. Ontological semantics are derived from syntactical interface specifications. Behavioral semantics are mined from call logs that cloud vendors create to monitor the hosted services. The specifications are harmonized over a global ontology. The service grounding is established using traceability information. The approach enables third party developers to compose services into complex systems and creates new sales channels for cloud and service providers.}},
  author       = {{Schwichtenberg, Simon}},
  booktitle    = {{Proceedings of the 38th International Conference on Software Engineering Companion (ICSE)}},
  pages        = {{815--818}},
  title        = {{{Automatized Derivation of Comprehensive Specifications for Black-box Services}}},
  doi          = {{10.1145/2889160.2889271}},
  year         = {{2016}},
}

@inproceedings{218,
  abstract     = {{In the Image Processing domain, automated generation of complex Image Processing functionality is highly desirable; e.g., for rapid prototyping. Service composition techniques, in turn, facilitate automated generation of complex functionality based on building blocks in terms of services. For that reason, we aim for transferring the Service Composition paradigm into the Image Processing domain. In this paper, we present our symbolic composition approach that enables us to automatically generate Image Processing applications. Functionality of Image Processing services is described by means of a variant of first-order logic, which grounds on domain knowledge operationalized in terms of ontologies. A Petri-net formalism serves as basis for modeling data-flow of services and composed services. A planning-based composition algorithm automatically composes complex data-flow for a required functionality. A brief evaluation serves as proof of concept.}},
  author       = {{Jungmann, Alexander and Kleinjohann, Bernd}},
  booktitle    = {{Proceedings of the 13th IEEE International Conference on Services Computing (SCC)}},
  pages        = {{106----113}},
  title        = {{{Automatic Composition of Service-based Image Processing Applications}}},
  doi          = {{10.1109/SCC.2016.21}},
  year         = {{2016}},
}

@inproceedings{219,
  abstract     = {{Existing software markets like Google Play allow users to search among available Apps and select one based on the description provided for the App or based on its rating. Future software markets facilitate on-the-fly composition of such Apps based on users’ individual wishes. Realizing such On-The-Fly Computing (OTF) markets requires support of sophisticated software features. In addition, suitable orchestration among such features needs to ensure well-alignment of business and IT aspects in case of run-time changes like market dynamics. However, all these introduce new architectural and management complexities, which are specific to such markets. An architecture framework for OTF markets will include design solutions to overcome these complexities. In my PhD, I aim at identifying an architecture framework for OTF markets including main architectural building blocks and a systematic development process. Such an architecture framework enables the development of OTF markets in the future. Furthermore, this knowledge can be used as a basis to improve existing software markets by integrating missing functionalities.}},
  author       = {{Jazayeri, Bahar}},
  booktitle    = {{Proceedings of the 10th European Conference on Software Architecture (ECSA Workshops)}},
  pages        = {{42}},
  publisher    = {{ACM}},
  title        = {{{Architectural Management of On-The-Fly Computing Markets}}},
  doi          = {{10.1145/2993412.3010821}},
  year         = {{2016}},
}

@phdthesis{220,
  author       = {{Keller, Matthias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Application Deployment at Distributed Clouds}}},
  year         = {{2016}},
}

@article{5762,
  abstract     = {{This paper introduces the problem of communication pattern adaption for a distributed self-adjusting binary search tree. We propose a simple local algorithm that is closely related to the over thirty-year-old idea of splay trees and evaluate its adaption performance in the distributed scenario if different communication patterns are provided. 
To do so, the process of self-adjustment is modeled similarly to a basic network creation game in which the nodes want to communicate with only a certain subset of all nodes. 
We show that, in general, the game (i.e., the process of local adjustments) does not converge, and that convergence is related to certain structures of the communication interests, which we call conflicts. 
We classify conflicts and show that for two communication scenarios in which convergence is guaranteed, the self-adjusting tree performs well. 
Furthermore, we investigate the different classes of conflicts separately and show that, for a certain class of conflicts, the performance of the tree network is asymptotically as good as the performance for converging instances.
However, for the other conflict classes, a distributed self-adjusting binary search tree adapts poorly.}},
  author       = {{Strothmann, Thim Frederik}},
  issn         = {{1526-1719}},
  journal      = {{Journal of Graph Algorithms and Applications}},
  number       = {{1}},
  pages        = {{79--100}},
  publisher    = {{Journal of Graph Algorithms and Applications}},
  title        = {{{The Impact of Communication Patterns on Distributed Self-Adjusting Binary Search Tree}}},
  doi          = {{10.7155/jgaa.00385}},
  volume       = {{20}},
  year         = {{2016}},
}

@article{726,
  author       = {{Wette, Philip and Karl, Holger}},
  journal      = {{Computer Communications}},
  pages        = {{45----58}},
  title        = {{{DCT²Gen: A traffic generator for data centers}}},
  doi          = {{10.1016/j.comcom.2015.12.001}},
  year         = {{2016}},
}

@inproceedings{730,
  abstract     = {{Allocating resources to virtualized network functions and services to meet service level agreements is a challenging task for NFV management and orchestration systems. This becomes even more challenging when agile development methodologies, like DevOps, are applied. In such scenarios, management and orchestration systems are continuously facing new versions of functions and services which makes it hard to decide how much resources have to be allocated to them to provide the expected service performance. 
One solution for this problem is to support resource allocation decisions with performance behavior information obtained by profiling techniques applied to such network functions and services.

In this position paper, we analyze and discuss the components needed to generate such performance behavior information within the NFV DevOps workflow. We also outline research questions that identify open issues and missing pieces for a fully integrated NFV profiling solution. Further, we introduce a novel profiling mechanism that is able to profile virtualized network functions and entire network service chains under different resource constraints before they are deployed on production infrastructure.}},
  author       = {{Peuster, Manuel and Karl, Holger}},
  booktitle    = {{Fifth European Workshop on Software-Defined Networks, EWSDN 2016, Den Haag, The Netherlands, October 10-11, 2016}},
  location     = {{Den Haag}},
  pages        = {{7----12}},
  title        = {{{Understand Your Chains: Towards Performance Profile-Based Network Service Management}}},
  doi          = {{10.1109/EWSDN.2016.9}},
  year         = {{2016}},
}

@inproceedings{731,
  abstract     = {{Traditional cellular networks are forced to remain active regardless of the actual amount of traffic that is currently produced/requested, with a clear waste of energy. Two-layer mobile networks with separated signalling and data layers have been recently proposed for energy savings in future implementations. These networks are able to switch off unneeded data cells completely while maintaining full coverage with their signalling cells, thus saving energy. In this demonstration, we showcase a testbed that uses Wi-Fi access points to emulate small cells of the data layer and a publicly available cellular connection as the signalling layer. We use off-the-shelf Android smartphones with an ad-hoc networking management module and a MultiPath TCP-enabled kernel to manage the Wi-Fi and cellular interfaces simultaneously.
The testbed is used to demonstrate the general feasibility of this layered architecture and to facilitate experiments with network-wide resource optimization. }},
  author       = {{Peuster, Manuel and Karl, Holger and Enrico Redondi, Alessandro and Capone, Antonio}},
  booktitle    = {{IEEE Conference on Computer Communications Workshops, INFOCOM Workshops 2016, San Francisco, CA, USA, April 10-14, 2016}},
  location     = {{San Francisco}},
  pages        = {{1015----1016}},
  title        = {{{Demonstrating on-demand cell switching with a two-layer mobile network testbed}}},
  doi          = {{10.1109/INFCOMW.2016.7562232}},
  year         = {{2016}},
}

@inproceedings{738,
  abstract     = {{Virtualized network services consisting of multiple individual network functions are already today deployed across multiple sites, so called multi-PoP (points of presence) environments. This allows to improve service performance by optimizing its placement in the network. But prototyping and testing of these complex distributed software systems becomes extremely challenging. The reason is that not only the network service as such has to be tested but also its integration with management and orchestration systems. Existing solutions, like simulators, basic network emulators, or local cloud testbeds, do not support all aspects of these tasks.

To this end, we introduce MeDICINE, a novel NFV prototyping platform that is able to execute production-ready network functions, provided as software containers, in an emulated multi-PoP environment. These network functions can be controlled by any third-party management and orchestration system that connects to our platform through standard interfaces. Based on this, a developer can use our platform to prototype and test complex network services in a realistic environment running on his laptop.
}},
  author       = {{Peuster, Manuel and Karl, Holger and van Rossem, Steven}},
  booktitle    = {{IEEE Conference on Network Function Virtualization and Software Defined Networks (NFV-SDN)}},
  location     = {{Palo Alto}},
  title        = {{{MeDICINE: Rapid Prototyping of Production-Ready Network Services in Multi-PoP Environments}}},
  doi          = {{10.1109/NFV-SDN.2016.7919490}},
  year         = {{2016}},
}

@inproceedings{4374,
  author       = {{Zimmermann, Steffen and Herrmann, Philipp and Kundisch, Dennis and Nault, Barrie}},
  booktitle    = {{Workshop on IS Design and Economic Behavior (ISDEB)}},
  location     = {{Ilmenau, Germany}},
  title        = {{{How do different Sources of the Variance of Online Consumer Ratings matter?}}},
  year         = {{2016}},
}

