@inproceedings{32410,
  abstract     = {{Static analysis tools support developers in detecting potential coding issues, such as bugs or vulnerabilities. Research on static analysis emphasizes its technical challenges but also mentions severe usability shortcomings. These shortcomings hinder the adoption of static analysis tools, and in some cases, user dissatisfaction even leads to tool abandonment.
To comprehensively assess the current state of the art, this paper presents the first systematic usability evaluation in a wide range of static analysis tools. We derived a set of 36 relevant criteria from the scientific literature and gathered a collection of 46 static analysis tools complying with our inclusion and exclusion criteria - a representative set of mainly non-proprietary tools. Then, we evaluated how well these tools fulfill the aforementioned criteria.
The evaluation shows that more than half of the considered tools offer poor warning messages, while about three-quarters of the tools provide hardly any fix support. Furthermore, the integration of user knowledge is strongly neglected, which could be used for improved handling of false positives and tuning the results for the corresponding developer. Finally, issues regarding workflow integration and specialized user interfaces are proved further.
These findings should prove useful in guiding and focusing further research and development in the area of user experience for static code analyses.}},
  author       = {{Nachtigall, Marcus and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{Proceedings of the 31st ACM SIGSOFT International Symposium on Software Testing and Analysis}},
  isbn         = {{9781450393799}},
  keywords     = {{Automated static analysis, Software usability}},
  pages        = {{532 -- 543}},
  publisher    = {{ACM}},
  title        = {{{A Large-Scale Study of Usability Criteria Addressed by Static Analysis Tools}}},
  doi          = {{10.1145/3533767}},
  year         = {{2022}},
}

@article{31071,
  abstract     = {{Distributed, software-intensive systems (e.g., in the automotive sector) must fulfill communication requirements under hard real-time constraints.  The requirements have to be documented and validated carefully using a systematic requirements engineering (RE) approach, for example, by applying scenario-based requirements notations. The resources of the execution platforms and their properties (e.g., CPU frequency or bus throughput) induce effects on the timing behavior, which may lead to violations of the real-time requirements. Nowadays, the platform properties and their induced timing effects are verified against the real-time requirements by means of timing analysis techniques mostly implemented in commercial-off-the-shelf tools. However, such timing analyses are conducted in late development phases since they rely on artifacts produced during these phases (e.g., the platform-specific code). In order to enable early timing analyses already during RE, we extend a scenario-based requirements notation with allocation means to platform models and define operational semantics for the purpose of simulation-based, platform-aware timing analyses. We illustrate and evaluate the approach with an automotive software-intensive system.}},
  author       = {{Holtmann, Jörg and Deantoni, Julien and Fockel, Markus}},
  issn         = {{1619-1366}},
  journal      = {{Software and Systems Modeling}},
  keywords     = {{Modeling and Simulation, Software}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Early timing analysis based on scenario requirements and platform models}}},
  doi          = {{10.1007/s10270-022-01002-3}},
  year         = {{2022}},
}

@article{33835,
  abstract     = {{<jats:p>
            Nowadays, an increasing number of applications uses deserialization. This technique, based on rebuilding the instance of objects from serialized byte streams, can be dangerous since it can open the application to attacks such as remote code execution (RCE) if the data to deserialize is originating from an untrusted source. Deserialization vulnerabilities are so critical that they are in OWASP’s list of top 10 security risks for web applications. This is mainly caused by faults in the development process of applications and by flaws in their dependencies, i.e., flaws in the libraries used by these applications. No previous work has studied deserialization attacks in-depth: How are they performed? How are weaknesses introduced and patched? And for how long are vulnerabilities present in the codebase? To yield a deeper understanding of this important kind of vulnerability, we perform two main analyses: one on attack gadgets, i.e., exploitable pieces of code, present in Java libraries, and one on vulnerabilities present in Java applications. For the first analysis, we conduct an exploratory large-scale study by running 256 515 experiments in which we vary the versions of libraries for each of the 19 publicly available exploits. Such attacks rely on a combination of
            <jats:italic>gadgets</jats:italic>
            present in one or multiple Java libraries. A gadget is a method which is using objects or fields that can be attacker-controlled. Our goal is to precisely identify library versions containing gadgets and to understand how gadgets have been introduced and how they have been patched. We observe that the modification of one innocent-looking detail in a class – such as making it
            <jats:monospace>public</jats:monospace>
            – can already introduce a gadget. Furthermore, we noticed that among the studied libraries, 37.5% are not patched, leaving gadgets available for future attacks.
          </jats:p>
          <jats:p>For the second analysis, we manually analyze 104 deserialization vulnerabilities CVEs to understand how vulnerabilities are introduced and patched in real-life Java applications. Results indicate that the vulnerabilities are not always completely patched or that a workaround solution is proposed. With a workaround solution, applications are still vulnerable since the code itself is unchanged.</jats:p>}},
  author       = {{Sayar, Imen and Bartel, Alexandre and Bodden, Eric and Le Traon, Yves}},
  issn         = {{1049-331X}},
  journal      = {{ACM Transactions on Software Engineering and Methodology}},
  keywords     = {{Software}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{An In-depth Study of Java Deserialization Remote-Code Execution Exploits and Vulnerabilities}}},
  doi          = {{10.1145/3554732}},
  year         = {{2022}},
}

@article{34414,
  abstract     = {{Given a steadily increasing demand on multi-material lightweight designs, fast and cost-efficient production technologies, such as the mechanical joining process clinching, are becoming more and more relevant for series production. Since the application of such joining techniques often base on the ability to reach similar or even better joint loading capacities compared to established joining processes (e.g., spot welding), few contributions investigated the systematic improvement of clinch joint characteristics. In this regard, the use of data-driven methods in combination with optimization algorithms showed already high potentials for the analysis of individual joints and the definition of optimal tool configurations. However, the often missing consideration of uncertainties, such as varying material properties, and the related calculation of their impact on clinch joint properties can lead to poor estimation results and thus to a decreased reliability of the entire joint connection. This can cause major challenges, especially for the design and dimensioning of safety-relevant components, such as in car bodies. Motivated by this, the presented contribution introduces a novel method for the robust estimation of clinch joint characteristics including uncertainties of varying and versatile process chains in mechanical joining. Therefore, the utilization of Gaussian process regression models is demonstrated and evaluated regarding the ability to achieve sufficient prediction qualities.}},
  author       = {{Zirngibl, Christoph and Schleich, Benjamin and Wartzack, Sandro}},
  issn         = {{0268-3768}},
  journal      = {{The International Journal of Advanced Manufacturing Technology}},
  keywords     = {{Industrial and Manufacturing Engineering, Computer Science Applications, Mechanical Engineering, Software, Control and Systems Engineering}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Robust estimation of clinch joint characteristics based on data-driven methods}}},
  doi          = {{10.1007/s00170-022-10441-7}},
  year         = {{2022}},
}

@article{45847,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>In this paper, we investigate the parameterized complexity of model checking for Dependence and Independence logic, which are well studied logics in the area of Team Semantics. We start with a list of nine immediate parameterizations for this problem, namely the number of disjunctions (i.e. splits)/(free) variables/universal quantifiers, formula-size, the tree-width of the Gaifman graph of the input structure, the size of the universe/team and the arity of dependence atoms. We present a comprehensive picture of the parameterized complexity of model checking and obtain a division of the problem into tractable and various intractable degrees. Furthermore, we also consider the complexity of the most important variants (data and expression complexity) of the model checking problem by fixing parts of the input.</jats:p>}},
  author       = {{Kontinen, Juha and Meier, Arne and Mahmood, Yasir}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  keywords     = {{Logic, Hardware and Architecture, Arts and Humanities (miscellaneous), Software, Theoretical Computer Science}},
  number       = {{8}},
  pages        = {{1624--1644}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{A parameterized view on the complexity of dependence and independence logic}}},
  doi          = {{10.1093/logcom/exac070}},
  volume       = {{32}},
  year         = {{2022}},
}

@inproceedings{32309,
  abstract     = {{Due to the increasing influences of a VUCA world, design thinking workshops have been established as a standard technique to build solutions according to uncertain customer needs. Concerning the ongoing pandemic and rising development of solutions across organizations, more and more workshops were conducted online with software support. However, existing software tools insufficiently address the different workshop situations in terms of the process (i.e., fixed tasks to conduct), the place (e.g., static online whiteboards), and people (i.e., synchronous working of all stakeholders).
Therefore, we propose a design science study to develop a situation-specific software support that can be configured with flexible development processes, different places, and task-related people. Based on practical experience in existing research projects, we derive the initial design requirements and map them to a set of design principles. Out of that, we design a concept with its implementation as a software tool and point out open challenges. }},
  author       = {{Gottschalk, Sebastian and Yigitbas, Enes and Nowosad, Alexander and Engels, Gregor}},
  booktitle    = {{Proceedings of the 5th International Workshop on Software-intensive Business (IWSiB'22) }},
  keywords     = {{design thinking, situation-specific, cross-organizational, software support}},
  publisher    = {{ACM}},
  title        = {{{Towards Situation-specific Software Support for Cross-organizational Design Thinking Processes}}},
  year         = {{2022}},
}

@article{33684,
  author       = {{Schade, Robert and Kenter, Tobias and Elgabarty, Hossam and Lass, Michael and Schütt, Ole and Lazzaro, Alfio and Pabst, Hans and Mohr, Stephan and Hutter, Jürg and Kühne, Thomas and Plessl, Christian}},
  issn         = {{0167-8191}},
  journal      = {{Parallel Computing}},
  keywords     = {{Artificial Intelligence, Computer Graphics and Computer-Aided Design, Computer Networks and Communications, Hardware and Architecture, Theoretical Computer Science, Software}},
  publisher    = {{Elsevier BV}},
  title        = {{{Towards electronic structure-based ab-initio molecular dynamics simulations with hundreds of millions of atoms}}},
  doi          = {{10.1016/j.parco.2022.102920}},
  volume       = {{111}},
  year         = {{2022}},
}

@article{30511,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Many critical codebases are written in C, and most of them use preprocessor directives to encode variability, effectively encoding software product lines. These preprocessor directives, however, challenge any static code analysis. SPLlift, a previously presented approach for analyzing software product lines, is limited to Java programs that use a rather simple feature encoding and to analysis problems with a finite and ideally small domain. Other approaches that allow the analysis of real-world C software product lines use special-purpose analyses, preventing the reuse of existing analysis infrastructures and ignoring the progress made by the static analysis community. This work presents <jats:sc>VarAlyzer</jats:sc>, a novel static analysis approach for software product lines. <jats:sc>VarAlyzer</jats:sc> first transforms preprocessor constructs to plain C while preserving their variability and semantics. It then solves any given distributive analysis problem on transformed product lines in a variability-aware manner. <jats:sc>VarAlyzer</jats:sc> ’s analysis results are annotated with feature constraints that encode in which configurations each result holds. Our experiments with 95 compilation units of OpenSSL show that applying <jats:sc>VarAlyzer</jats:sc> enables one to conduct inter-procedural, flow-, field- and context-sensitive data-flow analyses on entire product lines for the first time, outperforming the product-based approach for highly-configurable systems.</jats:p>}},
  author       = {{Schubert, Philipp and Gazzillo, Paul and Patterson, Zach and Braha, Julian and Schiebel, Fabian Benedikt and Hermann, Ben and Wei, Shiyi and Bodden, Eric}},
  issn         = {{0928-8910}},
  journal      = {{Automated Software Engineering}},
  keywords     = {{inter-procedural static analysis, software product lines, preprocessor, LLVM, C/C++}},
  number       = {{1}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Static data-flow analysis for software product lines in C}}},
  doi          = {{10.1007/s10515-022-00333-1}},
  volume       = {{29}},
  year         = {{2022}},
}

@inproceedings{26049,
  abstract     = {{Content is the new oil. Users consume billions of terabytes a day while surfing on news sites or blogs, posting on social media sites, and sending chat messages around the globe. While content is heterogeneous, the dominant form of web content is text. There are situations where more diversity needs to be introduced into text content, for example, to reuse it on websites or to allow a chatbot to base its models on the information conveyed rather than of the language used. In order to achieve this, paraphrasing techniques have been developed: One example is Text spinning, a technique that automatically paraphrases text while leaving the intent intact. This makes it easier to reuse content, or to change the language generated by the bot more human. One method for modifying texts is a combination of translation and back-translation. This paper presents NATTS, a naive approach that uses transformer-based translation models to create diversified text, combining translation steps in one model. An advantage of this approach is that it can be fine-tuned and handle technical language.}},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Denisov, Sergej and Geierhos, Michaela}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCES ON WWW/INTERNET 2021 AND APPLIED COMPUTING 2021}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{221----225}},
  publisher    = {{IADIS}},
  title        = {{{IN OTHER WORDS: A NAIVE APPROACH TO TEXT SPINNING}}},
  year         = {{2021}},
}

@article{30907,
  author       = {{Rodriguez, Alfonso and Otero, Andres and Platzner, Marco and De la Torre, Eduardo}},
  issn         = {{0018-9340}},
  journal      = {{IEEE Transactions on Computers}},
  keywords     = {{Computational Theory and Mathematics, Hardware and Architecture, Theoretical Computer Science, Software}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Exploiting Hardware-Based Data-Parallel and Multithreading Models for Smart Edge Computing in Reconfigurable FPGAs}}},
  doi          = {{10.1109/tc.2021.3107196}},
  year         = {{2021}},
}

@article{45844,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>Abductive reasoning is a non-monotonic formalism stemming from the work of Peirce. It describes the process of deriving the most plausible explanations of known facts. Considering the positive version, asking for sets of variables as explanations, we study, besides the problem of wether there exists a set of explanations, two explanation size limited variants of this reasoning problem (less than or equal to, and equal to a given size bound). In this paper, we present a thorough two-dimensional classification of these problems: the first dimension is regarding the parameterized complexity under a wealth of different parameterizations, and the second dimension spans through all possible Boolean fragments of these problems in Schaefer’s constraint satisfaction framework with co-clones (T. J. Schaefer. The complexity of satisfiability problems. In Proceedings of the 10th Annual ACM Symposium on Theory of Computing, May 1–3, 1978, San Diego, California, USA, R.J. Lipton, W.A. Burkhard, W.J. Savitch, E.P. Friedman, A.V. Aho eds, pp. 216–226. ACM, 1978). Thereby, we almost complete the parameterized complexity classification program initiated by Fellows et al. (The parameterized complexity of abduction. In Proceedings of the Twenty-Sixth AAAI Conference on Articial Intelligence, July 22–26, 2012, Toronto, Ontario, Canada, J. Homann, B. Selman eds. AAAI Press, 2012), partially building on the results by Nordh and Zanuttini (What makes propositional abduction tractable. Artificial Intelligence, 172, 1245–1284, 2008). In this process, we outline a fine-grained analysis of the inherent parameterized intractability of these problems and pinpoint their FPT parts. As the standard algebraic approach is not applicable to our problems, we develop an alternative method that makes the algebraic tools partially available again.</jats:p>}},
  author       = {{Mahmood, Yasir and Meier, Arne and Schmidt, Johannes}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  keywords     = {{Logic, Hardware and Architecture, Arts and Humanities (miscellaneous), Software, Theoretical Computer Science}},
  number       = {{1}},
  pages        = {{266--296}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{Parameterized complexity of abduction in Schaefer’s framework}}},
  doi          = {{10.1093/logcom/exaa079}},
  volume       = {{31}},
  year         = {{2021}},
}

@article{37146,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Organizations increasingly introduce collaborative technologies in form of virtual assistants (VAs) to save valuable resources, especially when employees are assisted with work-related tasks. However, the effect of VAs on virtual teams and collaboration remains uncertain, particularly whether employees show social loafing (SL) tendencies, i.e., applying less effort for collective tasks compared to working alone. While extant research indicates that VAs collaboratively working in teams exert greater results, less is known about SL in virtual collaboration and how responsibility attribution alters. An online experiment with N = 102 was conducted in which participants were assisted by a VA in solving a task. The results indicate SL tendencies in virtual collaboration with VAs and that participants tend to cede responsibility to the VA. This study makes a first foray and extends the information systems (IS) literature by analyzing SL and responsibility attribution thus updates our knowledge on virtual collaboration with VAs.</jats:p>}},
  author       = {{Stieglitz, Stefan and Mirbabaie, Milad and Möllmann, Nicholas R. J. and Rzyski, Jannik}},
  issn         = {{1387-3326}},
  journal      = {{Information Systems Frontiers}},
  keywords     = {{Computer Networks and Communications, Information Systems, Theoretical Computer Science, Software}},
  number       = {{3}},
  pages        = {{745--770}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Collaborating with Virtual Assistants in Organizations: Analyzing Social Loafing Tendencies and Responsibility Attribution}}},
  doi          = {{10.1007/s10796-021-10201-0}},
  volume       = {{24}},
  year         = {{2021}},
}

@article{27841,
  abstract     = {{Verification of software and processor hardware usually proceeds separately, software analysis relying on the correctness of processors executing machine instructions. This assumption is valid as long as the software runs on standard CPUs that have been extensively validated and are in wide use. However, for processors exploiting custom instruction set extensions to meet performance and energy constraints the validation might be less extensive, challenging the correctness assumption. In this paper we present a novel formal approach for hardware/software co-verification targeting processors with custom instruction set extensions. We detail two different approaches for checking whether the hardware fulfills the requirements expected by the software analysis. The approaches are designed to explore a trade-off between generality of the verification and computational effort. Then, we describe the integration of software and hardware analyses for both techniques and describe a fully automated tool chain implementing the approaches. Finally, we demonstrate and compare the two approaches on example source code with custom instructions, using state-of-the-art software analysis and hardware verification techniques.}},
  author       = {{Jakobs, Marie-Christine and Pauck, Felix and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}},
  journal      = {{IEEE Access}},
  keywords     = {{Software Analysis, Abstract Interpretation, Custom Instruction, Hardware Verification}},
  publisher    = {{IEEE}},
  title        = {{{Software/Hardware Co-Verification for Custom Instruction Set Processors}}},
  doi          = {{10.1109/ACCESS.2021.3131213}},
  year         = {{2021}},
}

@article{31132,
  author       = {{Dann, Andreas Peter and Plate, Henrik and Hermann, Ben and Ponta, Serena Elisa and Bodden, Eric}},
  issn         = {{0098-5589}},
  journal      = {{IEEE Transactions on Software Engineering}},
  keywords     = {{Software}},
  pages        = {{1--1}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Identifying Challenges for OSS Vulnerability Scanners - A Study &amp; Test Suite}}},
  doi          = {{10.1109/tse.2021.3101739}},
  year         = {{2021}},
}

@inproceedings{18686,
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCE ON APPLIED COMPUTING 2020}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{119----123}},
  publisher    = {{IADIS}},
  title        = {{{SEMANTIC TAGGING OF REQUIREMENT DESCRIPTIONS: A TRANSFORMER-BASED APPROACH}}},
  year         = {{2020}},
}

@inproceedings{18249,
  abstract     = {{Augmented Reality (AR) has recently found high attention in mobile shopping apps such as in domains like furniture or decoration. Here, the developers of the apps focus on the positioning of atomic 3D objects in the physical environment. With this focus, they neglect the conﬁguration of multi-faceted 3D object composition according to the user needs and environmental constraints. To tackle these challenges, we present a model-based approach to support AR-assisted product con-ﬁguration based on the concept of Dynamic Software Product Lines. Our approach splits products (e.g. table) into parts (e.g. tabletop, ta-ble legs, funnier) with their 3D objects and additional information (e.g. name, price). The possible products, which can be conﬁgured out of these parts, are stored in a feature model. At runtime, this feature model can be used to conﬁgure 3D object compositions out of the product parts and adapt to user needs and environmental constraints. The beneﬁts of this approach are demonstrated by a case study of conﬁguring modular kitchens with the help of a prototypical mobile-based implementation.}},
  author       = {{Gottschalk, Sebastian and Yigitbas, Enes and Schmidt, Eugen and Engels, Gregor}},
  booktitle    = {{Human-Centered Software Engineering. HCSE 2020}},
  editor       = {{Bernhaupt, Regina and Ardito, Carmelo and Sauer, Stefan}},
  keywords     = {{Product Configuration, Augmented Reality, Runtime Adaptation, Dynamic Software Product Lines}},
  location     = {{Eindhoven}},
  publisher    = {{Springer}},
  title        = {{{Model-based Product Configuration in Augmented Reality Applications}}},
  doi          = {{10.1007/978-3-030-64266-2_5}},
  volume       = {{12481}},
  year         = {{2020}},
}

@inproceedings{16933,
  abstract     = {{The continuous innovation of its business models is an important task for a company to stay competitive. During this process, the company has to validate various hypotheses about its business models by adapting to uncertain and changing customer needs effectively and efficiently. This adaptation, in turn, can be supported by the concept of Software Product Lines (SPLs). SPLs reduce the time to market by deriving products for customers with changing requirements using a common set of features, structured as a feature model. Analogously, we support the process of business model adaptation by applying the engineering process of SPLs to the structure of the Business Model Canvas (BMC). We call this concept a Business Model Decision Line (BMDL). The BMDL matches business domain knowledge in the form of a feature model with customer needs to derive hypotheses about the business model together with experiments for validation. Our approach is effective by providing a comprehensive overview of possible business model adaptations and efficient by reusing experiments for different hypotheses. We implement our approach in a tool and illustrate the usefulness with an example of developing business models for a mobile application.}},
  author       = {{Gottschalk, Sebastian and Rittmeier, Florian and Engels, Gregor}},
  booktitle    = {{Proceedings of the 22nd IEEE International Conference on Business Informatics}},
  keywords     = {{Business Model Decision Line, Business Model Adaptation, Hypothesis-driven Adaptation, Software Product Line, Feature Model}},
  location     = {{Antwerp}},
  publisher    = {{IEEE}},
  title        = {{{Hypothesis-driven Adaptation of Business Models based on Product Line Engineering}}},
  doi          = {{10.1109/CBI49978.2020.00022}},
  year         = {{2020}},
}

@inproceedings{9275,
  abstract     = {{In the last years, store-oriented software ecosystems are gaining
more and more attention from a business perspective. In these ecosystems,
third-party developers upload extensions to a store which can be
downloaded by end users. While the functional scope of such ecosystems
is relatively similar, the underlying business models differ greatly in and
between their different product domains (e.g. Mobile Phone, Smart TV).
This variability, in turn, makes it challenging for store providers to 
find a business model that fits their own needs.
To handle this variability, we introduce the Business Variability Model
(BVM) for modeling business model decisions. The basis of these decisions
is the analysis of 60 store-oriented software ecosystems in eight
different product domains. We map their business model decisions to the
Business Model Canvas, condense them to a variability model and discuss
particular variants and their dependencies. Our work provides store
providers a new approach for modeling business model decisions together
with insights of existing business models. This, in turn, supports them
in creating new and improving existing business models.}},
  author       = {{Gottschalk, Sebastian and Rittmeier, Florian and Engels, Gregor}},
  booktitle    = {{Business Modeling and Software Design}},
  editor       = {{Shishkov, Boris}},
  keywords     = {{Software Ecosystems, Business Models, Variabilities}},
  location     = {{Lisbon}},
  pages        = {{153--169}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Business Models of Store-Oriented Software Ecosystems: A Variability Modeling Approach}}},
  doi          = {{10.1007/978-3-030-24854-3_10}},
  year         = {{2019}},
}

@article{8424,
  abstract     = {{The vision of On-the-Fly (OTF) Computing is to compose and provide software services ad hoc, based on requirement descriptions in natural language. Since non-technical users write their software requirements themselves and in unrestricted natural language, deficits occur such as inaccuracy and incompleteness. These deficits are usually met by natural language processing methods, which have to face special challenges in OTF Computing because maximum automation is the goal. In this paper, we present current automatic approaches for solving inaccuracies and incompletenesses in natural language requirement descriptions and elaborate open challenges. In particular, we will discuss the necessity of domain-specific resources and show why, despite far-reaching automation, an intelligent and guided integration of end users into the compensation process is required. In this context, we present our idea of a chat bot that integrates users into the compensation process depending on the given circumstances. }},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Geierhos, Michaela}},
  issn         = {{2073-431X}},
  journal      = {{Computers}},
  keywords     = {{Inaccuracy Detection, Natural Language Software Requirements, Chat Bot}},
  location     = {{Vilnius, Lithuania}},
  number       = {{1}},
  publisher    = {{MDPI AG, Basel, Switzerland}},
  title        = {{{Natural Language Processing in OTF Computing: Challenges and the Need for Interactive Approaches}}},
  doi          = {{10.3390/computers8010022}},
  volume       = {{8}},
  year         = {{2019}},
}

@inproceedings{9809,
  abstract     = {{Remarkable advantages of Containers (CNs) over Virtual Machines (VMs) such as lower overhead and faster startup has gained the attention of Communication Service Providers (CSPs) as using CNs for providing Virtual Network Functions (VNFs) can save costs while increasing the service agility. However, as it is not feasible to realise all types of VNFs in CNs, the coexistence of VMs and CNs is proposed. To put VMs and CNs together, an orchestration framework that can chain services across distributed and heterogeneous domains is required. To this end, we implemented a framework by extending and consolidating state-of-the-art tools and technologies originated from Network Function Virtualization (NFV), Software-defined Networking (SDN) and cloud computing environments. This framework chains services provisioned across Kubernetes and OpenStack domains. During the demo, we deploy a service consist of CN- and VM-based VNFs to demonstrate different features provided by our framework.}},
  author       = {{Razzaghi Kouchaksaraei, Hadi and Karl, Holger}},
  booktitle    = {{13th ACM International Conference on Distributed and Event-based Systems}},
  keywords     = {{Network Function Virtualization, Software-defined Networking, Cloud Computing, service orchestration, OpenStack, Kubernetes}},
  location     = {{Darmstadt}},
  title        = {{{Service Function Chaining Across OpenStack and Kubernetes Domains}}},
  doi          = {{10.1145/3328905.3332505}},
  year         = {{2019}},
}

