@article{706,
  author       = {{Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören}},
  journal      = {{Journal of Combinatorial Optimization}},
  number       = {{4}},
  pages        = {{1168--1194}},
  publisher    = {{Springer}},
  title        = {{{Cost-efficient Scheduling on Machines from the Cloud}}},
  doi          = {{10.1007/s10878-017-0198-x}},
  volume       = {{36}},
  year         = {{2017}},
}

@phdthesis{707,
  author       = {{Walther, Sven}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Knowledge-based Verification of Service Compositions}}},
  doi          = {{10.17619/UNIPB/1-307}},
  year         = {{2017}},
}

@inproceedings{71,
  abstract     = {{Today, software verification tools have reached the maturity to be used for large scale programs. Different tools perform differently well on varying code. A software developer is hence faced with the problem of choosing a tool appropriate for her program at hand. A ranking of tools on programs could facilitate the choice. Such rankings can, however, so far only be obtained by running all considered tools on the program.In this paper, we present a machine learning approach to predicting rankings of tools on programs. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for programs. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from the software verification competition SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy (rank correlation > 0.6).}},
  author       = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 3rd International Workshop on Software Analytics}},
  pages        = {{23--26}},
  title        = {{{Predicting Rankings of Software Verification Tools}}},
  doi          = {{10.1145/3121257.3121262}},
  year         = {{2017}},
}

@inproceedings{717,
  abstract     = {{In conventional large-scale networks, creation and management of network services are costly and complex tasks that often consume a lot of resources, including time and manpower. Network softwarization and network function virtualization have been introduced to tackle these problems, aiming at decreasing costs and complexity of implementing new services, maintaining the implemented services, and managing available resources in service provisioning platforms and underlying infrastructures. To experience the full potential of these approaches, innovative development support tools and service provisioning environments are needed. To answer these needs, we introduce the architecture of the open-source SONATA system, a service programming, orchestration, and management framework. We present a development toolchain for virtualized network services, fully integrated with a service platform and orchestration system. We introduce the modular and flexible architecture of our system and discuss its main components and features, such as function- and service-specific managers that allow fine-grained service management, slicing support to facilitate multi-tenancy, recursiveness for improved scalability, and full-featured DevOps support.}},
  author       = {{Dräxler, Sevil and Karl, Holger and Peuster, Manuel and Razzaghi Kouchaksaraei, Hadi and Bredel, Michael and Lessmann, Johannes and Soenen, Thomas and Tavernier, Wouter and Mendel-Brin, Sharon and Xilouris, George}},
  booktitle    = {{2017 IEEE International Conference on Communications Workshops (ICC Workshops)}},
  isbn         = {{9781509015252}},
  location     = {{Paris, France}},
  publisher    = {{IEEE}},
  title        = {{{SONATA: Service programming and orchestration for virtualized software networks}}},
  doi          = {{10.1109/iccw.2017.7962785}},
  year         = {{2017}},
}

@techreport{72,
  abstract     = {{Software verification competitions, such as the annual SV-COMP, evaluate software verification tools with respect to their effectivity and efficiency. Typically, the outcome of a competition is a (possibly category-specific) ranking of the tools. For many applications, such as building portfolio solvers, it would be desirable to have an idea of the (relative) performance of verification tools on a given verification task beforehand, i.e., prior to actually running all tools on the task.In this paper, we present a machine learning approach to predicting rankings of tools on verification tasks. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for verification tasks. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy. In particular, our method outperforms a recently proposed feature-based approach of Demyanova et al. (when applied to rank predictions). }},
  author       = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
  title        = {{{Predicting Rankings of Software Verification Competitions}}},
  year         = {{2017}},
}

@inproceedings{73,
  abstract     = {{Today, verification tools do not only output yes or no, but also provide correctness arguments or counterexamples. While counterexamples help to fix bugs, correctness arguments are used to increase the trust in program correctness, e.g., in Proof-Carrying Code (PCC). Correctness arguments are well-studied for single analyses, but not when a set of analyses together verifies a program, each of the analyses checking only a particular part. Such a set of partial, complementary analyses is often used when a single analysis would fail or is inefficient on some program parts.We propose PART_PW, a technique which allows us to automatically construct a proof witness (correctness argument) from the analysis results obtained by a set of partial, complementary analyses. The constructed proof witnesses are proven to be valid correctness arguments and in our experiments we use them seamlessly and efficiently in existing PCC approaches.}},
  author       = {{Jakobs, Marie-Christine}},
  booktitle    = {{Software Engineering and Formal Methods}},
  editor       = {{Cimatti, Alessandro and Sirjani, Marjan}},
  pages        = {{120--135}},
  title        = {{{PART_PW: From Partial Analysis Results to a Proof Witness}}},
  doi          = {{10.1007/978-3-319-66197-1_8}},
  year         = {{2017}},
}

@misc{74,
  author       = {{Knorr, Christoph}},
  publisher    = {{Universität Paderborn}},
  title        = {{{OpenCL-basierte Videoverarbeitung auf heterogenen Rechenknoten}}},
  year         = {{2017}},
}

@inproceedings{84,
  abstract     = {{The increasing popularity of paradigms like service-oriented computing and cloud com-puting is leading to a growing amount of service providers offering software componentsin the form of deployed, ready-to-use services (Software as a Service, SaaS) [14, 20].In order to discover and select software services, intermediaries apply service matchingapproaches for determining whether the specification of a provided service satisfies therequester’s requirements. There are already lots of different service matching approachesconsidering different service properties (structural, behavioral, and non-functional proper-ties). However, each of these approaches alone is not enough to provide a high matchingresult quality (e.g., accurate matching results) [BOR04].Thus, such approaches should be combined into a more holistic approach leading to moreaccurate matching results. However, this combination is a manual, error-prone procedurewhere many design decisions are made. Furthermore, this procedure has to be repeatedfrequently depending on the context, e.g., to consider different requesters or markets.}},
  author       = {{Platenius, Marie Christin and Arifulina, Svetlana and Schäfer, Wilhelm}},
  booktitle    = {{Tagungsband Software Engineering}},
  pages        = {{81----82}},
  title        = {{{MatchBox: A Framework for Dynamic Configuration of Service Matching Processes (Extended Abstract)}}},
  year         = {{2017}},
}

@misc{85,
  author       = {{Lohrsträter, Lukas}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Literaturüberblick über die Rolle von Business Architecture in Enterprise Architecture}}},
  year         = {{2017}},
}

@misc{86,
  author       = {{Niggemeyer, Laura}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Kartellabsprachen und vertikale Preisbindungen - Eine wettbewerbspolitische Analyse am Bespiel der Lebensmittelindustrie in Deutschland}}},
  year         = {{2017}},
}

@inproceedings{87,
  abstract     = {{Management of complex network services requires flexible and efficient service provisioning as well as optimized handling of continuous changes in the workload of the service.To adapt to changes in the demand, service components need to be replicated (scaling) and allocated to physical resources (placement) dynamically. In this paper, we propose a fullyautomated approach to the joint optimization problem of scaling and placement, enabling quick reaction to changes. We formalize the problem, analyze its complexity, and develop two algorithms to solve it. Extensive empirical results show the applicability andeffectiveness of the proposed approach.}},
  author       = {{Dräxler, Sevil and Karl, Holger and Mann, Zoltan Adam}},
  booktitle    = {{Proceedings of the 17th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid 2017)}},
  title        = {{{Joint Optimization of Scaling and Placement of Virtual Network Services}}},
  doi          = {{10.1109/CCGRID.2017.25}},
  year         = {{2017}},
}

@misc{88,
  author       = {{Ganesh Athreya, Advait}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Instantiating a Predicate Encryption Scheme via Pair Encodings}}},
  year         = {{2017}},
}

@phdthesis{89,
  abstract     = {{The vision of OTF Computing is to have the software needs of end users in the future covered by an automatic composition of existing software services. Here we focus on natural language software requirements that end users formulate and submit to OTF providers as requirement specifications. These requirements serve as the sole foundation for the composition of software; but they can be inaccurate and incomplete. Up to now, software developers have identified and corrected these deficits by using a bidirectional consolidation process. However, this type of quality assurance is no longer included in OTF Computing - the classic consolidation process is dropped. This is where this work picks up, dealing with the inaccuracies of freely formulated software design requirements. To do this, we developed the CORDULA (Compensation of Requirements Descriptions Using Linguistic Analysis) system that recognizes and compensates for language deficiencies (e.g., ambiguity, vagueness and incompleteness) in requirements written by inexperienced end users. CORDULA supports the search for suitable software services that can be combined in a composition by transferring requirement specifications into canonical core functionalities. This dissertation provides the first-ever method for holistically recording and improving language deficiencies in user-generated requirement specifications by dealing with ambiguity, incompleteness and vagueness in parallel and in sequence.}},
  author       = {{Bäumer, Frederik Simon}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Indikatorbasierte Erkennung und Kompensation von ungenauen und unvollständig beschriebenen Softwareanforderungen}}},
  doi          = {{10.17619/UNIPB/1-157}},
  year         = {{2017}},
}

@misc{96,
  author       = {{Warkentin, Markus}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Geschäftsmodell-Frameworks zur Analyse und Klassifikation bestehender Geschäftsmodelle}}},
  year         = {{2017}},
}

@inproceedings{97,
  abstract     = {{Bridging the gap between informal, imprecise, and vague user requirements descriptions and precise formalized specifications is the main task of requirements engineering. Techniques such as interviews or story telling are used when requirements engineers try to identify a user's needs. The requirements specification process is typically done in a dialogue between users, domain experts, and requirements engineers. In our research, we aim at automating the specification of requirements. The idea is to distinguish between untrained users and trained users, and to exploit domain knowledge learned from previous runs of our system. We let untrained users provide unstructured natural language descriptions, while we allow trained users to provide examples of behavioral descriptions. In both cases, our goal is to synthesize formal requirements models similar to statecharts. From requirements specification processes with trained users, behavioral ontologies are learned which are later used to support the requirements specification process for untrained users. Our research method is original in combining natural language processing and search-based techniques for the synthesis of requirements specifications. Our work is embedded in a larger project that aims at automating the whole software development and deployment process in envisioned future software service markets.}},
  author       = {{van Rooijen, Lorijn and Bäumer, Frederik Simon and Platenius, Marie Christin and Geierhos, Michaela and Hamann, Heiko and Engels, Gregor}},
  booktitle    = {{2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)}},
  isbn         = {{978-1-5386-3489-9}},
  keywords     = {{Software, Unified modeling language, Requirements engineering, Ontologies, Search problems, Natural languages}},
  location     = {{Lisbon, Portugal}},
  pages        = {{379--385}},
  publisher    = {{IEEE}},
  title        = {{{From User Demand to Software Service: Using Machine Learning to Automate the Requirements Specification Process}}},
  doi          = {{10.1109/REW.2017.26}},
  year         = {{2017}},
}

@inproceedings{98,
  abstract     = {{Today, modern IT-systems are often an interplay of third-party web services. Developers in their role as requesters integrate existing services of different providers into new IT-systems. Providers use frameworks like Open API to create syntactic service specifications from which requesters generate code to integrate services. Proper service discovery is crucial to identify usable services in the growing plethora of third-party services. Most advanced service discovery approaches rely on semantic specifications, e.g., OWL-S. While semantic specification is crucial for a precise discovery, syntactical specification is needed for service invocation. To close the gap between semantic and syntactic specifications, service grounding establishes links between the semantic and syntactic specifications. However, for a large number of web services still no semantic specification or grounding exists. In this paper, we present an approach that semi-automates the semantic specification of web services for service providers and additionally helps service requesters to leverage semantic web services. Our approach enables a higher degree of automation than other approaches. This includes the creation of semantic specifications and service groundings for service providers as well as the integration of services for requesters by using our code generator. As proof-of-concept, we provide a case study, where we derive a sophisticated semantic OWL-S specification from a syntactic Open API specification.}},
  author       = {{Schwichtenberg, Simon and Gerth, Christian and Engels, Gregor}},
  booktitle    = {{Proceedings of the 24th IEEE International Conference on Web Services (ICWS)}},
  pages        = {{484----491}},
  title        = {{{From Open API to Semantic Specifications and Code Adapters}}},
  year         = {{2017}},
}

@inproceedings{981,
  abstract     = {{Benchmarking and profiling virtual network functions (VNFs) generates input
knowledge for resource management decisions taken by 
management and orchestration systems. 
Such VNFs are usually not executed in isolation but are often deployed as part of a service function chain (SFC) that connects single functions into complex 
structures. To manage such chains, isolated performance
profiles of single functions have to be combined to get insights into 
the overall behavior of an SFC. This becomes particularly
challenging in highly agile DevOps environments in which profiling
processes need to be fully automated and detailed insights about a chain's
internal structures are not always available. 

In this paper, we introduce a
fully automatable, flexible, and platform-agnostic profiling
system that allows to profile entire SFCs at once. This obviates 
manual modeling procedures to combine profiling results from single
VNFs to reflect SFC performance. 
We use a case study with different SFC configurations to show that it
is hard to model the resulting SFC performance based on single-VNF measurements and that
performance interactions between real, non-trivial functions that are deployed in a
chain exist.  }},
  author       = {{Peuster, Manuel and Karl, Holger}},
  booktitle    = {{IEEE Conference on Network Function Virtualisation and Software Defined Networks (NFV-SDN)}},
  location     = {{Berlin}},
  title        = {{{Profile Your Chains, Not Functions. Automated Network Service Profiling in DevOps Environments}}},
  doi          = {{10.1109/NFV-SDN.2017.8169826}},
  year         = {{2017}},
}

@inproceedings{99,
  author       = {{Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 14th International Conference on Formal Aspects of Component Software (FACS)}},
  title        = {{{Fault localization in service compositions}}},
  year         = {{2017}},
}

@misc{50,
  author       = {{Hamdeev, Erna}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Verzerrte Online-Produktbewertungen - Literaturüberblick}}},
  year         = {{2017}},
}

@misc{5084,
  author       = {{Streck, Thomas}},
  publisher    = {{Universität Paderborn}},
  title        = {{{How to adequately relocate asylum applicants within the European Union - An attempt to apply matching theory in the current migration crisis}}},
  year         = {{2017}},
}

