@inbook{2322,
  abstract     = {{The vision of On-The-Fly Computing is an automatic composition
of existing software services. Based on natural language software
descriptions, end users will receive compositions tailored to their needs.
For this reason, the quality of the initial software service description
strongly determines whether a software composition really meets the expectations
of end users. In this paper, we expose open NLP challenges
needed to be faced for service composition in On-The-Fly Computing.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 23rd International Conference on Natural Language and Information Systems}},
  editor       = {{Silberztein, Max  and Atigui, Faten  and Kornyshova, Elena  and Métais, Elisabeth  and Meziane, Farid }},
  isbn         = {{978-3-319-91946-1}},
  keywords     = {{Requirements Extraction, Temporal Reordering of Software Functions, Inaccuracy Compensation}},
  location     = {{Paris, France}},
  pages        = {{509--513}},
  publisher    = {{Springer}},
  title        = {{{How to Deal with Inaccurate Service Descriptions in On-The-Fly Computing: Open Challenges}}},
  doi          = {{10.1007/978-3-319-91947-8_53}},
  volume       = {{10859}},
  year         = {{2018}},
}

@article{2331,
  abstract     = {{A user generally writes software requirements in ambiguous and incomplete form by using natural language; therefore, a software developer may have difficulty in clearly understanding what the meanings are. To solve this problem with automation, we propose a classifier for semantic annotation with manually pre-defined semantic categories. To improve our classifier, we carefully designed syntactic features extracted by constituency and dependency parsers. Even with a small dataset and a large number of classes, our proposed classifier records an accuracy of 0.75, which outperforms the previous model, REaCT.}},
  author       = {{Kim, Yeongsu  and Lee, Seungwoo and Dollmann, Markus and Geierhos, Michaela}},
  issn         = {{2207-6360}},
  journal      = {{International Journal of Advanced Science and Technology}},
  keywords     = {{Software Engineering, Natural Language Processing, Semantic Annotation, Machine Learning, Feature Engineering, Syntactic Structure}},
  pages        = {{123--136}},
  publisher    = {{SERSC Australia}},
  title        = {{{Improving Classifiers for Semantic Annotation of Software Requirements with Elaborate Syntactic Structure}}},
  doi          = {{10.14257/ijast.2018.112.12}},
  volume       = {{112}},
  year         = {{2018}},
}

@inproceedings{4339,
  abstract     = {{On-The-Fly Computing is the vision of covering software needs of end users by fully-automatic compositions of existing software services. End users will receive so-called service compositions tailored to their very individual needs, based on natural language software descriptions. This everyday language may contain inaccuracies and incompleteness, which are well-known challenges in requirements engineering. In addition to existing approaches that try to automatically identify and correct these deficits, there are also new trends to involve users more in the elaboration and refinement process. In this paper, we present the relevant state of the art in the field of automated detection and compensation of multiple inaccuracies in natural language service descriptions and name open challenges needed to be tackled in NL-based software service composition. }},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 24th International Conference on Information and Software Technologies (ICIST 2018)}},
  editor       = {{Damaševičius, Robertas and Vasiljevienė, Giedrė}},
  isbn         = {{9783319999715}},
  issn         = {{1865-0929}},
  keywords     = {{Inaccuracy detection, Natural language software requirements}},
  location     = {{Vilnius, Lithuania}},
  pages        = {{559--570}},
  publisher    = {{Springer}},
  title        = {{{NLP in OTF Computing: Current Approaches and Open Challenges}}},
  doi          = {{10.1007/978-3-319-99972-2_46}},
  volume       = {{920}},
  year         = {{2018}},
}

@inproceedings{44,
  abstract     = {{Natural language software requirements descriptions enable end users to formulate their wishes and expectations for a future software product without much prior knowledge in requirements engineering. However, these descriptions are susceptible to linguistic inaccuracies such as ambiguities and incompleteness that can harm the development process. There is a number of software solutions that can detect deficits in requirements descriptions and partially solve them, but they are often hard to use and not suitable for end users. For this reason, we develop a software system that helps end-users to create unambiguous and complete requirements descriptions by combining existing expert tools and controlling them using automatic compensation strategies. In order to recognize the necessity of individual compensation methods in the descriptions, we have developed linguistic indicators, which we present in this paper. Based on these indicators, the whole text analysis pipeline is ad-hoc configured and thus adapted to the individual circumstances of a requirements description.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 51st Hawaii International Conference on System Sciences}},
  isbn         = {{978-0-9981331-1-9}},
  keywords     = {{Software Product Lines: Engineering, Services, and Management, Ambiguities, Incompleteness, Natural Language Processing, Software Requirements}},
  location     = {{Big Island, Waikoloa Village}},
  pages        = {{5746--5755}},
  title        = {{{Flexible Ambiguity Resolution and Incompleteness Detection in Requirements Descriptions via an Indicator-based Configuration of Text Analysis Pipelines}}},
  doi          = {{10125/50609}},
  year         = {{2018}},
}

@inbook{1104,
  abstract     = {{Präzision ist kein Zufall. Sie wird vom Menschen herbeigeführt, indem Übereinstimmung mit einem Standard oder einem akzeptierten Wert angestrebt wird oder die Reproduzierbarkeit von Experimenten möglichst hoch sein muss. Was aber tun, wenn Präzision mangels verfügbarer Informationen nicht hergestellt werden kann? Wie gehen Wissenschaft und Kunst dann mit dieser fehlenden Eindeutigkeit um? Die Autorinnen und Autoren dieses Sammelbandes beleuchten aus der Perspektive ihrer jeweiligen Fachdisziplin die Chancen bei der Berücksichtigung von Unschärfe(n) in ihrer Forschung und Kunst. Denn Unschärfe ist Realität. }},
  author       = {{Geierhos, Michaela}},
  booktitle    = {{Unschärfe - Der Umgang mit fehlender Eindeutigkeit}},
  editor       = {{Freitag, Steffen and Geierhos, Michaela and Asmani, Rozbeh and Haug, Judith I.}},
  isbn         = {{978-3-506-78896-2}},
  pages        = {{111--128}},
  publisher    = {{Ferdinand Schöningh}},
  title        = {{{Unschärfe bei der Interpretation natürlichsprachlicher Anforderungsbeschreibungen}}},
  year         = {{2018}},
}

@inproceedings{1181,
  abstract     = {{The main idea in On-The-Fly Computing is to automatically compose existing software services according to the wishes of end-users. However, since user requirements are often ambiguous, vague and incomplete, the selection and composition of suitable software services is a challanging task. In this paper, we present our current approach to improve requirement descriptions before they are used for software composition. This procedure is fully automated, but also has limitations, for example, if necessary information is missing. In addition, and in response to the limitations, we provide insights into our above-mentioned current work that combines the existing optimization approach with a
chatbot solution.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{How to Deal with Inaccurate Service Requirements? Insights in Our Current Approach and New Ideas}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{1182,
  abstract     = {{Natural language requirement descriptions are often unstructured, contradictory and incomplete and are therefore challenging for automatic processing. Although many of these deficits can be compensated by means of Natural Language Processing, there still remain cases where interaction with end-users is necessary for clarification. In this paper, we present our idea of using chatbot technology to establish end-user communication in order to support the automatic compensation of some deficits in natural language requirement descriptions.}},
  author       = {{Friesen, Edwin and Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus  and Spoletini, Paola  and Ben Charrada, Eya  and Chisik, Yoram  and Dalpiaz, Fabiano  and Ferrari, Alessio  and Forbrig, Peter  and Franch, Xavier  and Kirikova, Marite  and Madhavji, Nazim  and Palomares, Cristina  and Ralyté, Jolita  and Sabetzadeh, Mehrdad  and Sawyer, Pete  and van der Linden, Dirk  and Zamansky, Anna }},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{CORDULA: Software Requirements Extraction Utilizing Chatbot as Communication Interface}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{1183,
  abstract     = {{As our world grows in complexity, companies and employees alike need, more than ever before, solutions tailored to their exact needs. Since such tools cannot always be purchased off-the-shelf and need to be designed from the ground up, developers rely on software requirements. In this paper, we present our vision of a syntactic rule-based extraction
tool for software requirements specification documents. In contrast to other methods, our tool will allow stakeholders to express their needs and wishes in unfiltered natural language, which we believe is essential for non-expert users.}},
  author       = {{Caron, Matthew and Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{Back to Basics: Extracting Software Requirements with a Syntactic Approach}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{11710,
  author       = {{Chen, Wei-Fan and Wachsmuth, Henning and Al Khatib, Khalid and Stein, Benno}},
  booktitle    = {{Proceedings of the 11th International Conference on Natural Language Generation}},
  pages        = {{79--88}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Learning to Flip the Bias of News Headlines}}},
  year         = {{2018}},
}

@inproceedings{14873,
  author       = {{Chen, Wei-Fan and Hagen, Matthias and Stein, Benno and Potthast, Martin}},
  booktitle    = {{Proceedings of the 41st International ACM SIGIR Conference on Research & Development in Information Retrieval}},
  pages        = {{1033--1036}},
  title        = {{{A User Study on Snippet Generation: Text Reuse vs. Paraphrases}}},
  year         = {{2018}},
}

@inproceedings{14885,
  author       = {{Potthast, Martin and Chen, Wei-Fan and Hagen, Matthias and Stein, Benno}},
  booktitle    = {{Proceedings of the Second International Workshop on Recent Trends in News Information Retrieval}},
  pages        = {{3--5}},
  title        = {{{A Plan for Ancillary Copyright: Original Snippets.}}},
  year         = {{2018}},
}

@inbook{93,
  abstract     = {{In recent years, there has been a proliferation of technological developments that incorporate processing of human language. Hardware and software can be specialized for designated subject areas, and computational devices are designed for a widening variety of applications. At the same time, new areas and applications are emerging by demanding intelligent technology enhanced by the processing of human language. These new applications often perform tasks which handle information, and they have a capacity to reason, using both formal and human language. Many sub-areas of Artificial Intelligence demand integration of Natural Language Processing, at least to some degree. Furthermore, technologies require coverage of known as well as unknown agents, and tasks with potential variations. All of this takes place in environments with unknown factors.
The book covers theoretical work, advanced applications, approaches, and techniques for computational models of information, reasoning systems, and presentation in language. The book promotes work on intelligent natural language processing and related models of information, thought, reasoning, and other cognitive processes. The topics covered by the chapters prompt further research and developments of advanced systems in the areas of logic, computability, computational linguistics, cognitive science, neuroscience of language, robotics, and artificial intelligence, among others.}},
  author       = {{Geierhos, Michaela and Bäumer, Frederik Simon}},
  booktitle    = {{Partiality and Underspecification in Information, Languages, and Knowledge}},
  editor       = {{Christiansen, Henning  and Jiménez-López, M. Dolores and Loukanova, Roussanka  and Moss, Lawrence S.}},
  isbn         = {{978-1- 4438-7947-7}},
  pages        = {{65--108}},
  publisher    = {{Cambridge Scholars Publishing}},
  title        = {{{Guesswork? Resolving Vagueness in User-Generated Software Requirements}}},
  year         = {{2017}},
}

@inproceedings{57,
  abstract     = {{Users prefer natural language software requirements because of their usability and accessibility. Many approaches exist to elaborate these requirements and to support the users during the elicitation process. But there is a lack of adequate resources, which are needed to train and evaluate approaches for requirement refinement. We are trying to close this gap by using online available software descriptions from SourceForge and app stores. Thus, we present two real-life requirements collections based on online-available software descriptions. Our goal is to show the domain-specific characteristics of content words describing functional requirements. On the one hand, we created a semantic role-labeled requirements set, which we use for requirements classification. On the other hand, we enriched software descriptions with linguistic features and dependencies to provide evidence for the context-awareness of software functionalities. }},
  author       = {{Bäumer, Frederik Simon and Dollmann, Markus and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 2nd ACM SIGSOFT International Workshop on App Market Analytics}},
  editor       = {{Sarro, Federica  and Shihab, Emad  and Nagappan, Meiyappan  and Platenius, Marie Christin and Kaimann, Daniel}},
  isbn         = {{978-1-4503-5158-4}},
  location     = {{Paderborn, Germany}},
  pages        = {{19--25}},
  publisher    = {{ACM}},
  title        = {{{Studying Software Descriptions in SourceForge and App Stores for a better Understanding of real-life Requirements}}},
  doi          = {{10.1145/3121264.3121269}},
  year         = {{2017}},
}

@misc{46,
  author       = {{Grobbel, Florian}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Was kommt zuerst? Erkennung von zeitlichen Abläufen infunktionalen Softwareanforderungsbeschreibungen}}},
  year         = {{2017}},
}

@misc{47,
  author       = {{Theda, Mona}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Was ist gemeint? Strukturell ambige Sätze als Herausforderung für Parsing-Ansätze}}},
  year         = {{2017}},
}

@inproceedings{84,
  abstract     = {{The increasing popularity of paradigms like service-oriented computing and cloud com-puting is leading to a growing amount of service providers offering software componentsin the form of deployed, ready-to-use services (Software as a Service, SaaS) [14, 20].In order to discover and select software services, intermediaries apply service matchingapproaches for determining whether the specification of a provided service satisfies therequester’s requirements. There are already lots of different service matching approachesconsidering different service properties (structural, behavioral, and non-functional proper-ties). However, each of these approaches alone is not enough to provide a high matchingresult quality (e.g., accurate matching results) [BOR04].Thus, such approaches should be combined into a more holistic approach leading to moreaccurate matching results. However, this combination is a manual, error-prone procedurewhere many design decisions are made. Furthermore, this procedure has to be repeatedfrequently depending on the context, e.g., to consider different requesters or markets.}},
  author       = {{Platenius, Marie Christin and Arifulina, Svetlana and Schäfer, Wilhelm}},
  booktitle    = {{Tagungsband Software Engineering}},
  pages        = {{81----82}},
  title        = {{{MatchBox: A Framework for Dynamic Configuration of Service Matching Processes (Extended Abstract)}}},
  year         = {{2017}},
}

@phdthesis{89,
  abstract     = {{The vision of OTF Computing is to have the software needs of end users in the future covered by an automatic composition of existing software services. Here we focus on natural language software requirements that end users formulate and submit to OTF providers as requirement specifications. These requirements serve as the sole foundation for the composition of software; but they can be inaccurate and incomplete. Up to now, software developers have identified and corrected these deficits by using a bidirectional consolidation process. However, this type of quality assurance is no longer included in OTF Computing - the classic consolidation process is dropped. This is where this work picks up, dealing with the inaccuracies of freely formulated software design requirements. To do this, we developed the CORDULA (Compensation of Requirements Descriptions Using Linguistic Analysis) system that recognizes and compensates for language deficiencies (e.g., ambiguity, vagueness and incompleteness) in requirements written by inexperienced end users. CORDULA supports the search for suitable software services that can be combined in a composition by transferring requirement specifications into canonical core functionalities. This dissertation provides the first-ever method for holistically recording and improving language deficiencies in user-generated requirement specifications by dealing with ambiguity, incompleteness and vagueness in parallel and in sequence.}},
  author       = {{Bäumer, Frederik Simon}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Indikatorbasierte Erkennung und Kompensation von ungenauen und unvollständig beschriebenen Softwareanforderungen}}},
  doi          = {{10.17619/UNIPB/1-157}},
  year         = {{2017}},
}

@inproceedings{97,
  abstract     = {{Bridging the gap between informal, imprecise, and vague user requirements descriptions and precise formalized specifications is the main task of requirements engineering. Techniques such as interviews or story telling are used when requirements engineers try to identify a user's needs. The requirements specification process is typically done in a dialogue between users, domain experts, and requirements engineers. In our research, we aim at automating the specification of requirements. The idea is to distinguish between untrained users and trained users, and to exploit domain knowledge learned from previous runs of our system. We let untrained users provide unstructured natural language descriptions, while we allow trained users to provide examples of behavioral descriptions. In both cases, our goal is to synthesize formal requirements models similar to statecharts. From requirements specification processes with trained users, behavioral ontologies are learned which are later used to support the requirements specification process for untrained users. Our research method is original in combining natural language processing and search-based techniques for the synthesis of requirements specifications. Our work is embedded in a larger project that aims at automating the whole software development and deployment process in envisioned future software service markets.}},
  author       = {{van Rooijen, Lorijn and Bäumer, Frederik Simon and Platenius, Marie Christin and Geierhos, Michaela and Hamann, Heiko and Engels, Gregor}},
  booktitle    = {{2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)}},
  isbn         = {{978-1-5386-3489-9}},
  keywords     = {{Software, Unified modeling language, Requirements engineering, Ontologies, Search problems, Natural languages}},
  location     = {{Lisbon, Portugal}},
  pages        = {{379--385}},
  publisher    = {{IEEE}},
  title        = {{{From User Demand to Software Service: Using Machine Learning to Automate the Requirements Specification Process}}},
  doi          = {{10.1109/REW.2017.26}},
  year         = {{2017}},
}

@inproceedings{98,
  abstract     = {{Today, modern IT-systems are often an interplay of third-party web services. Developers in their role as requesters integrate existing services of different providers into new IT-systems. Providers use frameworks like Open API to create syntactic service specifications from which requesters generate code to integrate services. Proper service discovery is crucial to identify usable services in the growing plethora of third-party services. Most advanced service discovery approaches rely on semantic specifications, e.g., OWL-S. While semantic specification is crucial for a precise discovery, syntactical specification is needed for service invocation. To close the gap between semantic and syntactic specifications, service grounding establishes links between the semantic and syntactic specifications. However, for a large number of web services still no semantic specification or grounding exists. In this paper, we present an approach that semi-automates the semantic specification of web services for service providers and additionally helps service requesters to leverage semantic web services. Our approach enables a higher degree of automation than other approaches. This includes the creation of semantic specifications and service groundings for service providers as well as the integration of services for requesters by using our code generator. As proof-of-concept, we provide a case study, where we derive a sophisticated semantic OWL-S specification from a syntactic Open API specification.}},
  author       = {{Schwichtenberg, Simon and Gerth, Christian and Engels, Gregor}},
  booktitle    = {{Proceedings of the 24th IEEE International Conference on Web Services (ICWS)}},
  pages        = {{484----491}},
  title        = {{{From Open API to Semantic Specifications and Code Adapters}}},
  year         = {{2017}},
}

@misc{106,
  author       = {{Krammer, Isabel}},
  publisher    = {{Universität München}},
  title        = {{{Denn wir wissen, was gemeint ist: Erweiterung bestehender Lösungen zur lexikalischen Disambiguierung durch einen kontextsensitiven Whitelist-Ansatz}}},
  year         = {{2017}},
}

