@inbook{45882, author = {{Bäumer, Frederik Simon and Chen, Wei-Fan and Geierhos, Michaela and Kersting, Joschka and Wachsmuth, Henning}}, booktitle = {{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}, editor = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{65--84}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{Dialogue-based Requirement Compensation and Style-adjusted Data-to-text Generation}}}, doi = {{10.5281/zenodo.8068456}}, volume = {{412}}, year = {{2023}}, } @inbook{46205, abstract = {{We present a concept for quantifying evaluative phrases to later compare rating texts numerically instead of just relying on stars or grades. We achievethis by combining deep learning models in an aspect-based sentiment analysis pipeline along with sentiment weighting, polarity, and correlation analyses that combine deep learning results with metadata. The results provide new insights for the medical field. Our application domain, physician reviews, shows that there are millions of review texts on the Internet that cannot yet be comprehensively analyzed because previous studies have focused on explicit aspects from other domains (e.g., products). We identify, extract, and classify implicit and explicit aspect phrases equally from German-language review texts. To do so, we annotated aspect phrases representing reviews on numerous aspects of a physician, medical practice, or practice staff. We apply the best performing transformer model, XLM-RoBERTa, to a large physician review dataset and correlate the results with existing metadata. As a result, we can show different correlations between the sentiment polarity of certain aspect classes (e.g., friendliness, practice equipment) and physicians’ professions (e.g., surgeon, ophthalmologist). As a result, we have individual numerical scores that contain a variety of information based on deep learning algorithms that extract textual (evaluative) information and metadata from the Web.}}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Data Management Technologies and Applications}}, editor = {{Cuzzocrea, Alfredo and Gusikhin, Oleg and Hammoudi, Slimane and Quix, Christoph}}, isbn = {{9783031378898}}, issn = {{1865-0929}}, pages = {{45--65}}, publisher = {{Springer Nature Switzerland}}, title = {{{Towards Comparable Ratings: Quantifying Evaluative Phrases in Physician Reviews}}}, doi = {{10.1007/978-3-031-37890-4_3}}, volume = {{1860}}, year = {{2023}}, } @book{45863, abstract = {{In the proposal for our CRC in 2011, we formulated a vision of markets for IT services that describes an approach to the provision of such services that was novel at that time and, to a large extent, remains so today: „Our vision of on-the-fly computing is that of IT services individually and automatically configured and brought to execution from flexibly combinable services traded on markets. At the same time, we aim at organizing markets whose participants maintain a lively market of services through appropriate entrepreneurial actions.“ Over the last 12 years, we have developed methods and techniques to address problems critical to the convenient, efficient, and secure use of on-the-fly computing. Among other things, we have made the description of services more convenient by allowing natural language input, increased the quality of configured services through (natural language) interaction and more efficient configuration processes and analysis procedures, made the quality of (the products of) providers in the marketplace transparent through reputation systems, and increased the resource efficiency of execution through reconfigurable heterogeneous computing nodes and an integrated treatment of service description and configuration. We have also developed network infrastructures that have a high degree of adaptivity, scalability, efficiency, and reliability, and provide cryptographic guarantees of anonymity and security for market participants and their products and services. To demonstrate the pervasiveness of the OTF computing approach, we have implemented a proof-of-concept for OTF computing that can run typical scenarios of an OTF market. We illustrated the approach using a cutting-edge application scenario – automated machine learning (AutoML). Finally, we have been pushing our work for the perpetuation of On-The-Fly Computing beyond the SFB and sharing the expertise gained in the SFB in events with industry partners as well as transfer projects. This work required a broad spectrum of expertise. Computer scientists and economists with research interests such as computer networks and distributed algorithms, security and cryptography, software engineering and verification, configuration and machine learning, computer engineering and HPC, microeconomics and game theory, business informatics and management have successfully collaborated here.}}, author = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{247}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}}, doi = {{10.17619/UNIPB/1-1797}}, volume = {{412}}, year = {{2023}}, } @inproceedings{33274, author = {{Chen, Wei-Fan and Chen, Mei-Hua and Mudgal, Garima and Wachsmuth, Henning}}, booktitle = {{Proceedings of the 9th Workshop on Argument Mining (ArgMining 2022)}}, pages = {{51 -- 61}}, title = {{{Analyzing Culture-Specific Argument Structures in Learner Essays}}}, year = {{2022}}, } @inbook{32179, abstract = {{This work addresses the automatic resolution of software requirements. In the vision of On-The-Fly Computing, software services should be composed on demand, based solely on natural language input from human users. To enable this, we build a chatbot solution that works with human-in-the-loop support to receive, analyze, correct, and complete their software requirements. The chatbot is equipped with a natural language processing pipeline and a large knowledge base, as well as sophisticated dialogue management skills to enhance the user experience. Previous solutions have focused on analyzing software requirements to point out errors such as vagueness, ambiguity, or incompleteness. Our work shows how apps can collaborate with users to efficiently produce correct requirements. We developed and compared three different chatbot apps that can work with built-in knowledge. We rely on ChatterBot, DialoGPT and Rasa for this purpose. While DialoGPT provides its own knowledge base, Rasa is the best system to combine the text mining and knowledge solutions at our disposal. The evaluation shows that users accept 73% of the suggested answers from Rasa, while they accept only 63% from DialoGPT or even 36% from ChatterBot.}}, author = {{Kersting, Joschka and Ahmed, Mobeen and Geierhos, Michaela}}, booktitle = {{HCI International 2022 Posters}}, editor = {{Stephanidis, Constantine and Antona, Margherita and Ntoa, Stavroula}}, isbn = {{9783031064166}}, issn = {{1865-0929}}, keywords = {{On-The-Fly Computing, Chatbot, Knowledge Base}}, location = {{Virtual}}, pages = {{419----426}}, publisher = {{Springer International Publishing}}, title = {{{Chatbot-Enhanced Requirements Resolution for Automated Service Compositions}}}, doi = {{10.1007/978-3-031-06417-3_56}}, volume = {{1580}}, year = {{2022}}, } @inproceedings{31054, abstract = {{This paper aims at discussing past limitations set in sentiment analysis research regarding explicit and implicit mentions of opinions. Previous studies have regularly neglected this question in favor of methodical research on standard-datasets. Furthermore, they were limited to linguistically less-diverse domains, such as commercial product reviews. We face this issue by annotating a German-language physician review dataset that contains numerous implicit, long, and complex statements that indicate aspect ratings, such as the physician’s friendliness. We discuss the nature of implicit statements and present various samples to illustrate the challenge described.}}, author = {{Kersting, Joschka and Bäumer, Frederik Simon}}, booktitle = {{Proceedings of the Fourteenth International Conference on Pervasive Patterns and Applications (PATTERNS 2022): Special Track AI-DRSWA: Maturing Artificial Intelligence - Data Science for Real-World Applications}}, editor = {{Kersting, Joschka}}, keywords = {{Sentiment analysis, Natural language processing, Aspect phrase extraction}}, location = {{Barcelona, Spain}}, pages = {{5--9}}, publisher = {{IARIA}}, title = {{{Implicit Statements in Healthcare Reviews: A Challenge for Sentiment Analysis}}}, year = {{2022}}, } @inproceedings{31068, author = {{Chen, Mei-Hua and Mudgal, Garima and Chen, Wei-Fan and Wachsmuth, Henning}}, booktitle = {{EUROCALL}}, title = {{{Investigating the argumentation structures of EFL learners from diverse language backgrounds}}}, year = {{2022}}, } @inproceedings{26049, abstract = {{Content is the new oil. Users consume billions of terabytes a day while surfing on news sites or blogs, posting on social media sites, and sending chat messages around the globe. While content is heterogeneous, the dominant form of web content is text. There are situations where more diversity needs to be introduced into text content, for example, to reuse it on websites or to allow a chatbot to base its models on the information conveyed rather than of the language used. In order to achieve this, paraphrasing techniques have been developed: One example is Text spinning, a technique that automatically paraphrases text while leaving the intent intact. This makes it easier to reuse content, or to change the language generated by the bot more human. One method for modifying texts is a combination of translation and back-translation. This paper presents NATTS, a naive approach that uses transformer-based translation models to create diversified text, combining translation steps in one model. An advantage of this approach is that it can be fine-tuned and handle technical language.}}, author = {{Bäumer, Frederik Simon and Kersting, Joschka and Denisov, Sergej and Geierhos, Michaela}}, booktitle = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCES ON WWW/INTERNET 2021 AND APPLIED COMPUTING 2021}}, keywords = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}}, location = {{Lisbon, Portugal}}, pages = {{221----225}}, publisher = {{IADIS}}, title = {{{IN OTHER WORDS: A NAIVE APPROACH TO TEXT SPINNING}}}, year = {{2021}}, } @inbook{17905, abstract = {{This chapter concentrates on aspect-based sentiment analysis, a form of opinion mining where algorithms detect sentiments expressed about features of products, services, etc. We especially focus on novel approaches for aspect phrase extraction and classification trained on feature-rich datasets. Here, we present two new datasets, which we gathered from the linguistically rich domain of physician reviews, as other investigations have mainly concentrated on commercial reviews and social media reviews so far. To give readers a better understanding of the underlying datasets, we describe the annotation process and inter-annotator agreement in detail. In our research, we automatically assess implicit mentions or indications of specific aspects. To do this, we propose and utilize neural network models that perform the here-defined aspect phrase extraction and classification task, achieving F1-score values of about 80% and accuracy values of more than 90%. As we apply our models to a comparatively complex domain, we obtain promising results. }}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Natural Language Processing in Artificial Intelligence -- NLPinAI 2020}}, editor = {{Loukanova, Roussanka}}, pages = {{163----189 }}, publisher = {{Springer}}, title = {{{Towards Aspect Extraction and Classification for Opinion Mining with Deep Sequence Networks}}}, doi = {{10.1007/978-3-030-63787-3_6}}, volume = {{939}}, year = {{2021}}, } @inproceedings{22051, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Proceedings of the 10th International Conference on Data Science, Technology and Applications (DATA 2021)}}, location = {{Online}}, pages = {{275----284}}, publisher = {{SCITEPRESS}}, title = {{{Well-being in Plastic Surgery: Deep Learning Reveals Patients' Evaluations}}}, year = {{2021}}, } @inbook{22052, abstract = {{In this study, we describe a text processing pipeline that transforms user-generated text into structured data. To do this, we train neural and transformer-based models for aspect-based sentiment analysis. As most research deals with explicit aspects from product or service data, we extract and classify implicit and explicit aspect phrases from German-language physician review texts. Patients often rate on the basis of perceived friendliness or competence. The vocabulary is difficult, the topic sensitive, and the data user-generated. The aspect phrases come with various wordings using insertions and are not noun-based, which makes the presented case equally relevant and reality-based. To find complex, indirect aspect phrases, up-to-date deep learning approaches must be combined with supervised training data. We describe three aspect phrase datasets, one of them new, as well as a newly annotated aspect polarity dataset. Alongside this, we build an algorithm to rate the aspect phrase importance. All in all, we train eight transformers on the new raw data domain, compare 54 neural aspect extraction models and, based on this, create eight aspect polarity models for our pipeline. These models are evaluated by using Precision, Recall, and F-Score measures. Finally, we evaluate our aspect phrase importance measure algorithm.}}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Natural Language Processing and Information Systems}}, editor = {{Kapetanios, Epaminondas and Horacek, Helmut and Métais, Elisabeth and Meziane, Farid}}, location = {{Saarbrücken, Germany}}, pages = {{231----242}}, publisher = {{Springer}}, title = {{{Human Language Comprehension in Aspect Phrase Extraction with Importance Weighting}}}, volume = {{12801}}, year = {{2021}}, } @inproceedings{21178, abstract = {{When engaging in argumentative discourse, skilled human debaters tailor claims to the beliefs of the audience, to construct effective arguments. Recently, the field of computational argumentation witnessed extensive effort to address the automatic generation of arguments. However, existing approaches do not perform any audience-specific adaptation. In this work, we aim to bridge this gap by studying the task of belief-based claim generation: Given a controversial topic and a set of beliefs, generate an argumentative claim tailored to the beliefs. To tackle this task, we model the people's prior beliefs through their stances on controversial topics and extend state-of-the-art text generation models to generate claims conditioned on the beliefs. Our automatic evaluation confirms the ability of our approach to adapt claims to a set of given beliefs. In a manual study, we additionally evaluate the generated claims in terms of informativeness and their likelihood to be uttered by someone with a respective belief. Our results reveal the limitations of modeling users' beliefs based on their stances, but demonstrate the potential of encoding beliefs into argumentative texts, laying the ground for future exploration of audience reach.}}, author = {{Alshomary, Milad and Chen, Wei-Fan and Gurcke, Timon and Wachsmuth, Henning}}, booktitle = {{Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume}}, location = {{Online}}, pages = {{224--223}}, publisher = {{Association for Computational Linguistics}}, title = {{{Belief-based Generation of Argumentative Claims}}}, year = {{2021}}, } @inproceedings{23709, author = {{Chen, Wei-Fan and Al Khatib, Khalid and Stein, Benno and Wachsmuth, Henning}}, booktitle = {{Findings of the Association for Computational Linguistics: EMNLP 2021}}, pages = {{2683 -- 2693}}, title = {{{Controlled Neural Sentence-Level Reframing of News Articles}}}, year = {{2021}}, } @inproceedings{22229, author = {{Alshomary, Milad and Syed, Shahbaz and Potthast, Martin and Wachsmuth, Henning}}, booktitle = {{Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021)}}, location = {{Online}}, pages = {{1816–1827}}, publisher = {{Association for Computational Linguistics}}, title = {{{Argument Undermining: Counter-Argument Generation by Attacking Weak Premises}}}, doi = {{10.18653/v1/2021.findings-acl.159}}, year = {{2021}}, } @inbook{17347, abstract = {{Peer-to-Peer news portals allow Internet users to write news articles and make them available online to interested readers. Despite the fact that authors are free in their choice of topics, there are a number of quality characteristics that an article must meet before it is published. In addition to meaningful titles, comprehensibly written texts and meaning- ful images, relevant tags are an important criteria for the quality of such news. In this case study, we discuss the challenges and common mistakes that Peer-to-Peer reporters face when tagging news and how incorrect information can be corrected through the orchestration of existing Natu- ral Language Processing services. Lastly, we use this illustrative example to give insight into the challenges of dealing with bottom-up taxonomies.}}, author = {{Bäumer, Frederik Simon and Kersting, Joschka and Buff, Bianca and Geierhos, Michaela}}, booktitle = {{Information and Software Technologies}}, editor = {{Audrius, Lopata and Rita, Butkienė and Daina, Gudonienė and Vilma, Sukackė}}, location = {{Kaunas, Litauen}}, pages = {{368----382}}, publisher = {{Springer}}, title = {{{Tag Me If You Can: Insights into the Challenges of Supporting Unrestricted P2P News Tagging}}}, doi = {{https://doi.org/10.1007/978-3-030-59506-7_30}}, volume = {{1283}}, year = {{2020}}, } @inproceedings{18686, author = {{Kersting, Joschka and Bäumer, Frederik Simon}}, booktitle = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCE ON APPLIED COMPUTING 2020}}, keywords = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}}, location = {{Lisbon, Portugal}}, pages = {{119----123}}, publisher = {{IADIS}}, title = {{{SEMANTIC TAGGING OF REQUIREMENT DESCRIPTIONS: A TRANSFORMER-BASED APPROACH}}}, year = {{2020}}, } @inproceedings{15580, abstract = {{This paper deals with aspect phrase extraction and classification in sentiment analysis. We summarize current approaches and datasets from the domain of aspect-based sentiment analysis. This domain detects sentiments expressed for individual aspects in unstructured text data. So far, mainly commercial user reviews for products or services such as restaurants were investigated. We here present our dataset consisting of German physician reviews, a sensitive and linguistically complex field. Furthermore, we describe the annotation process of a dataset for supervised learning with neural networks. Moreover, we introduce our model for extracting and classifying aspect phrases in one step, which obtains an F1-score of 80%. By applying it to a more complex domain, our approach and results outperform previous approaches.}}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Proceedings of the 12th International Conference on Agents and Artificial Intelligence (ICAART 2020) -- Special Session on Natural Language Processing in Artificial Intelligence (NLPinAI 2020)}}, keywords = {{Deep Learning, Natural Language Processing, Aspect-based Sentiment Analysis}}, location = {{Valetta, Malta}}, pages = {{391----400}}, publisher = {{SCITEPRESS}}, title = {{{Aspect Phrase Extraction in Sentiment Analysis with Deep Learning}}}, year = {{2020}}, } @inproceedings{15582, abstract = {{When it comes to increased digitization in the health care domain, privacy is a relevant topic nowadays. This relates to patient data, electronic health records or physician reviews published online, for instance. There exist different approaches to the protection of individuals’ privacy, which focus on the anonymization and masking of personal information subsequent to their mining. In the medical domain in particular, measures to protect the privacy of patients are of high importance due to the amount of sensitive data that is involved (e.g. age, gender, illnesses, medication). While privacy breaches in structured data can be detected more easily, disclosure in written texts is more difficult to find automatically due to the unstructured nature of natural language. Therefore, we take a detailed look at existing research on areas related to privacy protection. Likewise, we review approaches to the automatic detection of privacy disclosure in different types of medical data. We provide a survey of several studies concerned with privacy breaches in the medical domain with a focus on Physician Review Websites (PRWs). Finally, we briefly develop implications and directions for further research.}}, author = {{Buff, Bianca and Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods (ICPRAM 2020)}}, keywords = {{Identity Disclosure, Privacy Protection, Physician Review Website, De-Anonymization, Medical Domain}}, location = {{Valetta, Malta}}, pages = {{630----637}}, publisher = {{SCITEPRESS}}, title = {{{Detection of Privacy Disclosure in the Medical Domain: A Survey}}}, year = {{2020}}, } @inproceedings{15635, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Proceedings of the 33rd International Florida Artificial Intelligence Research Symposium (FLAIRS) Conference}}, location = {{North Miami Beach, FL, USA}}, pages = {{282----285}}, publisher = {{AAAI}}, title = {{{Neural Learning for Aspect Phrase Extraction and Classification in Sentiment Analysis}}}, year = {{2020}}, } @article{15025, abstract = {{In software engineering, the imprecise requirements of a user are transformed to a formal requirements specification during the requirements elicitation process. This process is usually guided by requirements engineers interviewing the user. We want to partially automate this first step of the software engineering process in order to enable users to specify a desired software system on their own. With our approach, users are only asked to provide exemplary behavioral descriptions. The problem of synthesizing a requirements specification from examples can partially be reduced to the problem of grammatical inference, to which we apply an active coevolutionary learning approach. However, this approach would usually require many feedback queries to be sent to the user. In this work, we extend and generalize our active learning approach to receive knowledge from multiple oracles, also known as proactive learning. The ‘user oracle’ represents input received from the user and the ‘knowledge oracle’ represents available, formalized domain knowledge. We call our two-oracle approach the ‘first apply knowledge then query’ (FAKT/Q) algorithm. We compare FAKT/Q to the active learning approach and provide an extensive benchmark evaluation. As result we find that the number of required user queries is reduced and the inference process is sped up significantly. Finally, with so-called On-The-Fly Markets, we present a motivation and an application of our approach where such knowledge is available.}}, author = {{Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko}}, journal = {{Evolutionary Computation}}, number = {{2}}, pages = {{165–193}}, publisher = {{MIT Press Journals}}, title = {{{Multi-Oracle Coevolutionary Learning of Requirements Specifications from Examples in On-The-Fly Markets}}}, doi = {{10.1162/evco_a_00266}}, volume = {{28}}, year = {{2020}}, } @inproceedings{15256, abstract = {{This paper deals with online customer reviews of local multi-service providers. While many studies investigate product reviews and online labour markets with service providers delivering intangible products “over the wire”, we focus on websites where providers offer multiple distinct services that can be booked, paid and reviewed online but are performed locally offline. This type of service providers has so far been neglected in the literature. This paper analyses reviews and applies sentiment analysis. It aims to gain new insights into local multi-service providers’ performance. There is a broad literature range presented with regard to the topics addressed. The results show, among other things, that providers with good ratings continue to perform well over time. We find that many positive reviews seem to encourage sales. On average, quantitative star ratings and qualitative ratings in the form of review texts match. Further results are also achieved in this study.}}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods}}, keywords = {{Customer Reviews, Sentiment Analysis, Online Labour Markets}}, location = {{Valetta, Malta}}, pages = {{263----272}}, publisher = {{SCITEPRESS}}, title = {{{What Reviews in Local Online Labour Markets Reveal about the Performance of Multi-Service Providers}}}, year = {{2020}}, } @article{13770, author = {{Karl, Holger and Kundisch, Dennis and Meyer auf der Heide, Friedhelm and Wehrheim, Heike}}, journal = {{Business & Information Systems Engineering}}, number = {{6}}, pages = {{467--481}}, publisher = {{Springer}}, title = {{{A Case for a New IT Ecosystem: On-The-Fly Computing}}}, doi = {{10.1007/s12599-019-00627-x}}, volume = {{62}}, year = {{2020}}, } @inproceedings{3776, author = {{Chen, Wei-Fan and Al-Khatib, Khalid and Wachsmuth, Henning and Stein, Benno}}, booktitle = {{Proceedings of the Fourth Workshop on Natural Language Processing and Computational Social Science}}, pages = {{149--154}}, title = {{{Analyzing Political Bias and Unfairness in News Articles at Different Levels of Granularity}}}, year = {{2020}}, } @inproceedings{20137, author = {{Syed, Shahbaz and Chen, Wei-Fan and Hagen, Matthias and Stein, Benno and Wachsmuth, Henning and Potthast, Martin}}, booktitle = {{Proceedings of the 13th International Conference on Natural Language Generation (INLG 2020)}}, pages = {{237--241}}, title = {{{Task Proposal: Abstractive Snippet Generation for Web Pages}}}, year = {{2020}}, } @inproceedings{3818, author = {{Chen, Wei-Fan and Al-Khatib, Khalid and Stein, Benno and Wachsmuth, Henning}}, booktitle = {{Findings of the Association for Computational Linguistics: EMNLP 2020}}, pages = {{4290--4300}}, title = {{{Detecting Media Bias in News Articles using Gaussian Bias Distributions}}}, year = {{2020}}, } @inproceedings{15826, author = {{Chen, Wei-Fan and Syed, Shahbaz and Stein, Benno and Hagen, Matthias and Potthast, Martin}}, booktitle = {{Proceedings of the Web Conference 2020}}, pages = {{1309--1319}}, title = {{{Abstractive Snippet Generation}}}, year = {{2020}}, } @inproceedings{16868, author = {{Alshomary, Milad and Syed, Shahbaz and Potthast, Martin and Wachsmuth, Henning}}, booktitle = {{Proceedings of 58th Annual Meeting of the Association for Computational Linguistics (ACL 2020)}}, location = {{Seattle, USA}}, pages = {{4334--4345}}, publisher = {{Association for Computational Linguistics}}, title = {{{Target Inference in Argument Conclusion Generation}}}, year = {{2020}}, } @misc{8312, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{encyclopedia.pub}}, keywords = {{OTF Computing, Natural Language Processing, Requirements Engineering}}, publisher = {{MDPI}}, title = {{{Requirements Engineering in OTF-Computing}}}, year = {{2019}}, } @article{8424, abstract = {{The vision of On-the-Fly (OTF) Computing is to compose and provide software services ad hoc, based on requirement descriptions in natural language. Since non-technical users write their software requirements themselves and in unrestricted natural language, deficits occur such as inaccuracy and incompleteness. These deficits are usually met by natural language processing methods, which have to face special challenges in OTF Computing because maximum automation is the goal. In this paper, we present current automatic approaches for solving inaccuracies and incompletenesses in natural language requirement descriptions and elaborate open challenges. In particular, we will discuss the necessity of domain-specific resources and show why, despite far-reaching automation, an intelligent and guided integration of end users into the compensation process is required. In this context, we present our idea of a chat bot that integrates users into the compensation process depending on the given circumstances. }}, author = {{Bäumer, Frederik Simon and Kersting, Joschka and Geierhos, Michaela}}, issn = {{2073-431X}}, journal = {{Computers}}, keywords = {{Inaccuracy Detection, Natural Language Software Requirements, Chat Bot}}, location = {{Vilnius, Lithuania}}, number = {{1}}, publisher = {{MDPI AG, Basel, Switzerland}}, title = {{{Natural Language Processing in OTF Computing: Challenges and the Need for Interactive Approaches}}}, doi = {{10.3390/computers8010022}}, volume = {{8}}, year = {{2019}}, } @inproceedings{13259, author = {{Chen, Wei-Fan and Al-Khatib, Khalid and Hagen, Matthias and Wachsmuth, Henning and Stein, Benno}}, booktitle = {{Proceedings of the Second Workshop on Natural Language Processing for Internet Freedom}}, pages = {{76--82}}, title = {{{Unraveling the Search Space of Abusive Language in Wikipedia with Dynamic Lexicon Acquisition}}}, year = {{2019}}, } @inbook{2322, abstract = {{The vision of On-The-Fly Computing is an automatic composition of existing software services. Based on natural language software descriptions, end users will receive compositions tailored to their needs. For this reason, the quality of the initial software service description strongly determines whether a software composition really meets the expectations of end users. In this paper, we expose open NLP challenges needed to be faced for service composition in On-The-Fly Computing.}}, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Proceedings of the 23rd International Conference on Natural Language and Information Systems}}, editor = {{Silberztein, Max and Atigui, Faten and Kornyshova, Elena and Métais, Elisabeth and Meziane, Farid }}, isbn = {{978-3-319-91946-1}}, keywords = {{Requirements Extraction, Temporal Reordering of Software Functions, Inaccuracy Compensation}}, location = {{Paris, France}}, pages = {{509--513}}, publisher = {{Springer}}, title = {{{How to Deal with Inaccurate Service Descriptions in On-The-Fly Computing: Open Challenges}}}, doi = {{10.1007/978-3-319-91947-8_53}}, volume = {{10859}}, year = {{2018}}, } @article{2331, abstract = {{A user generally writes software requirements in ambiguous and incomplete form by using natural language; therefore, a software developer may have difficulty in clearly understanding what the meanings are. To solve this problem with automation, we propose a classifier for semantic annotation with manually pre-defined semantic categories. To improve our classifier, we carefully designed syntactic features extracted by constituency and dependency parsers. Even with a small dataset and a large number of classes, our proposed classifier records an accuracy of 0.75, which outperforms the previous model, REaCT.}}, author = {{Kim, Yeongsu and Lee, Seungwoo and Dollmann, Markus and Geierhos, Michaela}}, issn = {{2207-6360}}, journal = {{International Journal of Advanced Science and Technology}}, keywords = {{Software Engineering, Natural Language Processing, Semantic Annotation, Machine Learning, Feature Engineering, Syntactic Structure}}, pages = {{123--136}}, publisher = {{SERSC Australia}}, title = {{{Improving Classifiers for Semantic Annotation of Software Requirements with Elaborate Syntactic Structure}}}, doi = {{10.14257/ijast.2018.112.12}}, volume = {{112}}, year = {{2018}}, } @inproceedings{4339, abstract = {{On-The-Fly Computing is the vision of covering software needs of end users by fully-automatic compositions of existing software services. End users will receive so-called service compositions tailored to their very individual needs, based on natural language software descriptions. This everyday language may contain inaccuracies and incompleteness, which are well-known challenges in requirements engineering. In addition to existing approaches that try to automatically identify and correct these deficits, there are also new trends to involve users more in the elaboration and refinement process. In this paper, we present the relevant state of the art in the field of automated detection and compensation of multiple inaccuracies in natural language service descriptions and name open challenges needed to be tackled in NL-based software service composition. }}, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Proceedings of the 24th International Conference on Information and Software Technologies (ICIST 2018)}}, editor = {{Damaševičius, Robertas and Vasiljevienė, Giedrė}}, isbn = {{9783319999715}}, issn = {{1865-0929}}, keywords = {{Inaccuracy detection, Natural language software requirements}}, location = {{Vilnius, Lithuania}}, pages = {{559--570}}, publisher = {{Springer}}, title = {{{NLP in OTF Computing: Current Approaches and Open Challenges}}}, doi = {{10.1007/978-3-319-99972-2_46}}, volume = {{920}}, year = {{2018}}, } @inproceedings{44, abstract = {{Natural language software requirements descriptions enable end users to formulate their wishes and expectations for a future software product without much prior knowledge in requirements engineering. However, these descriptions are susceptible to linguistic inaccuracies such as ambiguities and incompleteness that can harm the development process. There is a number of software solutions that can detect deficits in requirements descriptions and partially solve them, but they are often hard to use and not suitable for end users. For this reason, we develop a software system that helps end-users to create unambiguous and complete requirements descriptions by combining existing expert tools and controlling them using automatic compensation strategies. In order to recognize the necessity of individual compensation methods in the descriptions, we have developed linguistic indicators, which we present in this paper. Based on these indicators, the whole text analysis pipeline is ad-hoc configured and thus adapted to the individual circumstances of a requirements description.}}, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Proceedings of the 51st Hawaii International Conference on System Sciences}}, isbn = {{978-0-9981331-1-9}}, keywords = {{Software Product Lines: Engineering, Services, and Management, Ambiguities, Incompleteness, Natural Language Processing, Software Requirements}}, location = {{Big Island, Waikoloa Village}}, pages = {{5746--5755}}, title = {{{Flexible Ambiguity Resolution and Incompleteness Detection in Requirements Descriptions via an Indicator-based Configuration of Text Analysis Pipelines}}}, doi = {{10125/50609}}, year = {{2018}}, } @inbook{1104, abstract = {{Präzision ist kein Zufall. Sie wird vom Menschen herbeigeführt, indem Übereinstimmung mit einem Standard oder einem akzeptierten Wert angestrebt wird oder die Reproduzierbarkeit von Experimenten möglichst hoch sein muss. Was aber tun, wenn Präzision mangels verfügbarer Informationen nicht hergestellt werden kann? Wie gehen Wissenschaft und Kunst dann mit dieser fehlenden Eindeutigkeit um? Die Autorinnen und Autoren dieses Sammelbandes beleuchten aus der Perspektive ihrer jeweiligen Fachdisziplin die Chancen bei der Berücksichtigung von Unschärfe(n) in ihrer Forschung und Kunst. Denn Unschärfe ist Realität. }}, author = {{Geierhos, Michaela}}, booktitle = {{Unschärfe - Der Umgang mit fehlender Eindeutigkeit}}, editor = {{Freitag, Steffen and Geierhos, Michaela and Asmani, Rozbeh and Haug, Judith I.}}, isbn = {{978-3-506-78896-2}}, pages = {{111--128}}, publisher = {{Ferdinand Schöningh}}, title = {{{Unschärfe bei der Interpretation natürlichsprachlicher Anforderungsbeschreibungen}}}, year = {{2018}}, } @inproceedings{1181, abstract = {{The main idea in On-The-Fly Computing is to automatically compose existing software services according to the wishes of end-users. However, since user requirements are often ambiguous, vague and incomplete, the selection and composition of suitable software services is a challanging task. In this paper, we present our current approach to improve requirement descriptions before they are used for software composition. This procedure is fully automated, but also has limitations, for example, if necessary information is missing. In addition, and in response to the limitations, we provide insights into our above-mentioned current work that combines the existing optimization approach with a chatbot solution.}}, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}}, editor = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}}, issn = {{1613-0073}}, location = {{Utrecht, The Netherlands}}, publisher = {{CEUR-WS.org}}, title = {{{How to Deal with Inaccurate Service Requirements? Insights in Our Current Approach and New Ideas}}}, volume = {{2075}}, year = {{2018}}, } @inproceedings{1182, abstract = {{Natural language requirement descriptions are often unstructured, contradictory and incomplete and are therefore challenging for automatic processing. Although many of these deficits can be compensated by means of Natural Language Processing, there still remain cases where interaction with end-users is necessary for clarification. In this paper, we present our idea of using chatbot technology to establish end-user communication in order to support the automatic compensation of some deficits in natural language requirement descriptions.}}, author = {{Friesen, Edwin and Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}}, editor = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna }}, issn = {{1613-0073}}, location = {{Utrecht, The Netherlands}}, publisher = {{CEUR-WS.org}}, title = {{{CORDULA: Software Requirements Extraction Utilizing Chatbot as Communication Interface}}}, volume = {{2075}}, year = {{2018}}, } @inproceedings{1183, abstract = {{As our world grows in complexity, companies and employees alike need, more than ever before, solutions tailored to their exact needs. Since such tools cannot always be purchased off-the-shelf and need to be designed from the ground up, developers rely on software requirements. In this paper, we present our vision of a syntactic rule-based extraction tool for software requirements specification documents. In contrast to other methods, our tool will allow stakeholders to express their needs and wishes in unfiltered natural language, which we believe is essential for non-expert users.}}, author = {{Caron, Matthew and Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}}, editor = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}}, issn = {{1613-0073}}, location = {{Utrecht, The Netherlands}}, publisher = {{CEUR-WS.org}}, title = {{{Back to Basics: Extracting Software Requirements with a Syntactic Approach}}}, volume = {{2075}}, year = {{2018}}, } @inproceedings{11710, author = {{Chen, Wei-Fan and Wachsmuth, Henning and Al Khatib, Khalid and Stein, Benno}}, booktitle = {{Proceedings of the 11th International Conference on Natural Language Generation}}, pages = {{79--88}}, publisher = {{Association for Computational Linguistics}}, title = {{{Learning to Flip the Bias of News Headlines}}}, year = {{2018}}, } @inproceedings{14873, author = {{Chen, Wei-Fan and Hagen, Matthias and Stein, Benno and Potthast, Martin}}, booktitle = {{Proceedings of the 41st International ACM SIGIR Conference on Research & Development in Information Retrieval}}, pages = {{1033--1036}}, title = {{{A User Study on Snippet Generation: Text Reuse vs. Paraphrases}}}, year = {{2018}}, } @inproceedings{14885, author = {{Potthast, Martin and Chen, Wei-Fan and Hagen, Matthias and Stein, Benno}}, booktitle = {{Proceedings of the Second International Workshop on Recent Trends in News Information Retrieval}}, pages = {{3--5}}, title = {{{A Plan for Ancillary Copyright: Original Snippets.}}}, year = {{2018}}, } @inbook{93, abstract = {{In recent years, there has been a proliferation of technological developments that incorporate processing of human language. Hardware and software can be specialized for designated subject areas, and computational devices are designed for a widening variety of applications. At the same time, new areas and applications are emerging by demanding intelligent technology enhanced by the processing of human language. These new applications often perform tasks which handle information, and they have a capacity to reason, using both formal and human language. Many sub-areas of Artificial Intelligence demand integration of Natural Language Processing, at least to some degree. Furthermore, technologies require coverage of known as well as unknown agents, and tasks with potential variations. All of this takes place in environments with unknown factors. The book covers theoretical work, advanced applications, approaches, and techniques for computational models of information, reasoning systems, and presentation in language. The book promotes work on intelligent natural language processing and related models of information, thought, reasoning, and other cognitive processes. The topics covered by the chapters prompt further research and developments of advanced systems in the areas of logic, computability, computational linguistics, cognitive science, neuroscience of language, robotics, and artificial intelligence, among others.}}, author = {{Geierhos, Michaela and Bäumer, Frederik Simon}}, booktitle = {{Partiality and Underspecification in Information, Languages, and Knowledge}}, editor = {{Christiansen, Henning and Jiménez-López, M. Dolores and Loukanova, Roussanka and Moss, Lawrence S.}}, isbn = {{978-1- 4438-7947-7}}, pages = {{65--108}}, publisher = {{Cambridge Scholars Publishing}}, title = {{{Guesswork? Resolving Vagueness in User-Generated Software Requirements}}}, year = {{2017}}, } @inproceedings{57, abstract = {{Users prefer natural language software requirements because of their usability and accessibility. Many approaches exist to elaborate these requirements and to support the users during the elicitation process. But there is a lack of adequate resources, which are needed to train and evaluate approaches for requirement refinement. We are trying to close this gap by using online available software descriptions from SourceForge and app stores. Thus, we present two real-life requirements collections based on online-available software descriptions. Our goal is to show the domain-specific characteristics of content words describing functional requirements. On the one hand, we created a semantic role-labeled requirements set, which we use for requirements classification. On the other hand, we enriched software descriptions with linguistic features and dependencies to provide evidence for the context-awareness of software functionalities. }}, author = {{Bäumer, Frederik Simon and Dollmann, Markus and Geierhos, Michaela}}, booktitle = {{Proceedings of the 2nd ACM SIGSOFT International Workshop on App Market Analytics}}, editor = {{Sarro, Federica and Shihab, Emad and Nagappan, Meiyappan and Platenius, Marie Christin and Kaimann, Daniel}}, isbn = {{978-1-4503-5158-4}}, location = {{Paderborn, Germany}}, pages = {{19--25}}, publisher = {{ACM}}, title = {{{Studying Software Descriptions in SourceForge and App Stores for a better Understanding of real-life Requirements}}}, doi = {{10.1145/3121264.3121269}}, year = {{2017}}, } @inproceedings{84, abstract = {{The increasing popularity of paradigms like service-oriented computing and cloud com-puting is leading to a growing amount of service providers offering software componentsin the form of deployed, ready-to-use services (Software as a Service, SaaS) [14, 20].In order to discover and select software services, intermediaries apply service matchingapproaches for determining whether the specification of a provided service satisfies therequester’s requirements. There are already lots of different service matching approachesconsidering different service properties (structural, behavioral, and non-functional proper-ties). However, each of these approaches alone is not enough to provide a high matchingresult quality (e.g., accurate matching results) [BOR04].Thus, such approaches should be combined into a more holistic approach leading to moreaccurate matching results. However, this combination is a manual, error-prone procedurewhere many design decisions are made. Furthermore, this procedure has to be repeatedfrequently depending on the context, e.g., to consider different requesters or markets.}}, author = {{Platenius, Marie Christin and Arifulina, Svetlana and Schäfer, Wilhelm}}, booktitle = {{Tagungsband Software Engineering}}, pages = {{81----82}}, title = {{{MatchBox: A Framework for Dynamic Configuration of Service Matching Processes (Extended Abstract)}}}, year = {{2017}}, } @inproceedings{97, abstract = {{Bridging the gap between informal, imprecise, and vague user requirements descriptions and precise formalized specifications is the main task of requirements engineering. Techniques such as interviews or story telling are used when requirements engineers try to identify a user's needs. The requirements specification process is typically done in a dialogue between users, domain experts, and requirements engineers. In our research, we aim at automating the specification of requirements. The idea is to distinguish between untrained users and trained users, and to exploit domain knowledge learned from previous runs of our system. We let untrained users provide unstructured natural language descriptions, while we allow trained users to provide examples of behavioral descriptions. In both cases, our goal is to synthesize formal requirements models similar to statecharts. From requirements specification processes with trained users, behavioral ontologies are learned which are later used to support the requirements specification process for untrained users. Our research method is original in combining natural language processing and search-based techniques for the synthesis of requirements specifications. Our work is embedded in a larger project that aims at automating the whole software development and deployment process in envisioned future software service markets.}}, author = {{van Rooijen, Lorijn and Bäumer, Frederik Simon and Platenius, Marie Christin and Geierhos, Michaela and Hamann, Heiko and Engels, Gregor}}, booktitle = {{2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)}}, isbn = {{978-1-5386-3489-9}}, keywords = {{Software, Unified modeling language, Requirements engineering, Ontologies, Search problems, Natural languages}}, location = {{Lisbon, Portugal}}, pages = {{379--385}}, publisher = {{IEEE}}, title = {{{From User Demand to Software Service: Using Machine Learning to Automate the Requirements Specification Process}}}, doi = {{10.1109/REW.2017.26}}, year = {{2017}}, } @inproceedings{98, abstract = {{Today, modern IT-systems are often an interplay of third-party web services. Developers in their role as requesters integrate existing services of different providers into new IT-systems. Providers use frameworks like Open API to create syntactic service specifications from which requesters generate code to integrate services. Proper service discovery is crucial to identify usable services in the growing plethora of third-party services. Most advanced service discovery approaches rely on semantic specifications, e.g., OWL-S. While semantic specification is crucial for a precise discovery, syntactical specification is needed for service invocation. To close the gap between semantic and syntactic specifications, service grounding establishes links between the semantic and syntactic specifications. However, for a large number of web services still no semantic specification or grounding exists. In this paper, we present an approach that semi-automates the semantic specification of web services for service providers and additionally helps service requesters to leverage semantic web services. Our approach enables a higher degree of automation than other approaches. This includes the creation of semantic specifications and service groundings for service providers as well as the integration of services for requesters by using our code generator. As proof-of-concept, we provide a case study, where we derive a sophisticated semantic OWL-S specification from a syntactic Open API specification.}}, author = {{Schwichtenberg, Simon and Gerth, Christian and Engels, Gregor}}, booktitle = {{Proceedings of the 24th IEEE International Conference on Web Services (ICWS)}}, pages = {{484----491}}, title = {{{From Open API to Semantic Specifications and Code Adapters}}}, year = {{2017}}, } @article{1098, abstract = {{An end user generally writes down software requirements in ambiguous expressions using natural language; hence, a software developer attuned to programming language finds it difficult to understand th meaning of the requirements. To solve this problem we define semantic categories for disambiguation and classify/annotate the requirement into the categories by using machine-learning models. We extensively use a language frame closely related to such categories for designing features to overcome the problem of insufficient training data compare to the large number of classes. Our proposed model obtained a micro-average F1-score of 0.75, outperforming the previous model, REaCT.}}, author = {{Kim, Yeong-Su and Lee, Seung-Woo and Dollmann, Markus and Geierhos, Michaela}}, issn = {{2205-8494}}, journal = {{International Journal of Software Engineering for Smart Device}}, keywords = {{Natural Language Processing, Semantic Annotation, Machine Learning}}, number = {{2}}, pages = {{1--6}}, publisher = {{Global Vision School Publication}}, title = {{{Semantic Annotation of Software Requirements with Language Frame}}}, volume = {{4}}, year = {{2017}}, } @inproceedings{120, abstract = {{Within software engineering, requirements engineering starts from imprecise and vague user requirements descriptions and infers precise, formalized specifications. Techniques, such as interviewing by requirements engineers, are typically applied to identify the user’s needs. We want to partially automate even this first step of requirements elicitation by methods of evolutionary computation. The idea is to enable users to specify their desired software by listing examples of behavioral descriptions. Users initially specify two lists of operation sequences, one with desired behaviors and one with forbidden behaviors. Then, we search for the appropriate formal software specification in the form of a deterministic finite automaton. We solve this problem known as grammatical inference with an active coevolutionary approach following Bongard and Lipson [2]. The coevolutionary process alternates between two phases: (A) additional training data is actively proposed by an evolutionary process and the user is interactively asked to label it; (B) appropriate automata are then evolved to solve this extended grammatical inference problem. Our approach leverages multi-objective evolution in both phases and outperforms the state-of-the-art technique [2] for input alphabet sizes of three and more, which are relevant to our problem domain of requirements specification.}}, author = {{Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference (GECCO)}}, pages = {{1327----1334}}, title = {{{Active Coevolutionary Learning of Requirements Specifications from Examples}}}, doi = {{10.1145/3071178.3071258}}, year = {{2017}}, } @inproceedings{176, abstract = {{Users prefer natural language software requirements because of their usability and accessibility. When they describe their wishes for software development, they often provide off-topic information. We therefore present an automated approach for identifying and semantically annotating the on-topic parts of the given descriptions. It is designed to support requirement engineers in the requirement elicitation process on detecting and analyzing requirements in user-generated content. Since no lexical resources with domain-specific information about requirements are available, we created a corpus of requirements written in controlled language by instructed users and uncontrolled language by uninstructed users. We annotated these requirements regarding predicate-argument structures, conditions, priorities, motivations and semantic roles and used this information to train classifiers for information extraction purposes. The approach achieves an accuracy of 92% for the on- and off-topic classification task and an F1-measure of 72% for the semantic annotation.}}, author = {{Dollmann, Markus and Geierhos, Michaela}}, booktitle = {{Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP)}}, location = {{Austin, TX, USA}}, pages = {{1807--1816}}, publisher = {{Association for Computational Linguistics (ACL)}}, title = {{{On- and Off-Topic Classification and Semantic Annotation of User-Generated Software Requirements}}}, year = {{2016}}, } @article{190, abstract = {{Today, software components are provided by global markets in the form of services. In order to optimally satisfy service requesters and service providers, adequate techniques for automatic service matching are needed. However, a requester’s requirements may be vague and the information available about a provided service may be incomplete. As a consequence, fuzziness is induced into the matching procedure. The contribution of this paper is the development of a systematic matching procedure that leverages concepts and techniques from fuzzy logic and possibility theory based on our formal distinction between different sources and types of fuzziness in the context of service matching. In contrast to existing methods, our approach is able to deal with imprecision and incompleteness in service specifications and to inform users about the extent of induced fuzziness in order to improve the user’s decision-making. We demonstrate our approach on the example of specifications for service reputation based on ratings given by previous users. Our evaluation based on real service ratings shows the utility and applicability of our approach.}}, author = {{Platenius, Marie Christin and Shaker, Ammar and Becker, Matthias and Hüllermeier, Eyke and Schäfer, Wilhelm}}, journal = {{IEEE Transactions on Software Engineering (TSE), presented at ICSE 2017}}, number = {{8}}, pages = {{739--759}}, publisher = {{IEEE}}, title = {{{Imprecise Matching of Requirements Specifications for Software Services using Fuzzy Logic}}}, doi = {{10.1109/TSE.2016.2632115}}, year = {{2016}}, } @inproceedings{191, abstract = {{One purpose of requirement refinement is that higher-level requirements have to be translated to something usable by developers. Since customer requirements are often written in natural language by end users, they lack precision, completeness and consistency. Although user stories are often used in the requirement elicitation process in order to describe the possibilities how to interact with the software, there is always something unspoken. Here, we present techniques how to automatically refine vague software descriptions. Thus, we can bridge the gap by first revising natural language utterances from higher-level to more detailed customer requirements, before functionality matters. We therefore focus on the resolution of semantically incomplete user-generated sentences (i.e. non-instantiated arguments of predicates) and provide ontology-based gap-filling suggestions how to complete unverbalized information in the user’s demand.}}, author = {{Geierhos, Michaela and Bäumer, Frederik Simon}}, booktitle = {{Proceedings of the 21st International Conference on Applications of Natural Language to Information Systems (NLDB)}}, editor = {{Métais, Elisabeth and Meziane, Farid and Saraee, Mohamad and Sugumaran, Vijayan and Vadera, Sunil }}, isbn = {{978-3-319-41753-0}}, keywords = {{Requirement refinement, Concept expansion, Ontology-based instantiation of predicate-argument structure}}, location = {{Salford, UK}}, pages = {{37--47}}, publisher = {{Springer}}, title = {{{How to Complete Customer Requirements: Using Concept Expansion for Requirement Refinement}}}, doi = {{10.1007/978-3-319-41754-7_4}}, volume = {{9612}}, year = {{2016}}, } @techreport{221, author = {{Platenius, Marie Christin and Josifovska, Klementina and van Rooijen, Lorijn and Arifulina, Svetlana and Becker, Matthias and Engels, Gregor and Schäfer, Wilhelm}}, publisher = {{Universität Paderborn}}, title = {{{An Overview of Service Specification Language and Matching in On-The-Fly Computing (v0.3)}}}, year = {{2016}}, } @inproceedings{217, abstract = {{Today, cloud vendors host third party black-box services, whose developers usually provide only textual descriptions or purely syntactical interface specifications. Cloud vendors that give substantial support to other third party developers to integrate hosted services into new software solutions would have a unique selling feature over their competitors. However, to reliably determine if a service is reusable, comprehensive service specifications are needed. Characteristic for comprehensive in contrast to syntactical specifications are the formalization of ontological and behavioral semantics, homogeneity according to a global ontology, and a service grounding that links the abstract service description and its technical realization. Homogeneous, semantical specifications enable to reliably identify reusable services, whereas the service grounding is needed for the technical service integration. In general, comprehensive specifications are not availableand have to be derived. Existing automatized approaches are restricted to certain characteristics of comprehensiveness. In my PhD, I consider an automatized approach to derive fully-fledged comprehensive specifications for black-box services. Ontological semantics are derived from syntactical interface specifications. Behavioral semantics are mined from call logs that cloud vendors create to monitor the hosted services. The specifications are harmonized over a global ontology. The service grounding is established using traceability information. The approach enables third party developers to compose services into complex systems and creates new sales channels for cloud and service providers.}}, author = {{Schwichtenberg, Simon}}, booktitle = {{Proceedings of the 38th International Conference on Software Engineering Companion (ICSE)}}, pages = {{815--818}}, title = {{{Automatized Derivation of Comprehensive Specifications for Black-box Services}}}, doi = {{10.1145/2889160.2889271}}, year = {{2016}}, } @inproceedings{169, abstract = {{We apply methods of genetic programming to a general problem from software engineering, namely example-based generation of specifications. In particular, we focus on model transformation by example. The definition and implementation of model transformations is a task frequently carried out by domain experts, hence, a (semi-)automatic approach is desirable. This application is challenging because the underlying search space has rich semantics, is high-dimensional, and unstructured. Hence, a computationally brute-force approach would be unscalable and potentially infeasible. To address that problem, we develop a sophisticated approach of designing complex mutation operators. We define ‘patterns’ for constructing mutation operators and report a successful case study. Furthermore, the code of the evolved model transformation is required to have high maintainability and extensibility, that is, the code should be easily readable by domain experts. We report an evaluation of this approach in a software engineering case study.}}, author = {{Kühne, Thomas and Hamann, Heiko and Arifulina, Svetlana and Engels, Gregor}}, booktitle = {{Proceedings of the 19th European Conference on Genetic Programming (EuroGP 2016)}}, pages = {{278----293}}, title = {{{Patterns for Constructing Mutation Operators: Limiting the Search Space in a Software Engineering Application}}}, doi = {{10.1007/978-3-319-30668-1_18}}, year = {{2016}}, } @inproceedings{158, abstract = {{While requirements focus on how the user interacts with the system, user stories concentrate on the purpose of software features. But in practice, functional requirements are also described in user stories. For this reason, requirements clarification is needed, especially when they are written in natural language and do not stick to any templates (e.g., "as an X, I want Y so that Z ..."). However, there is a lot of implicit knowledge that is not expressed in words. As a result, natural language requirements descriptions may suffer from incompleteness. Existing approaches try to formalize natural language or focus only on entirely missing and not on deficient requirements. In this paper, we therefore present an approach to detect knowledge gaps in user-generated software requirements for interactive requirement clarification: We provide tailored suggestions to the users in order to get more precise descriptions. For this purpose, we identify not fully instantiated predicate argument structures in requirements written in natural language and use context information to realize what was meant by the user.}}, author = {{Bäumer, Frederik Simon and Geierhos, Michaela}}, booktitle = {{Proceedings of the 22nd International Conference on Information and Software Technologies (ICIST)}}, editor = {{Dregvaite, Giedre and Damasevicius, Robertas }}, isbn = {{978-3-319-46253-0}}, keywords = {{Natural language requirements clarification, Syntactically incomplete requirements, Compensatory user stories}}, location = {{Druskininkai, Lithuania}}, pages = {{549--558}}, publisher = {{Springer}}, title = {{{Running out of Words: How Similar User Stories Can Help to Elaborate Individual Natural Language Requirement Descriptions}}}, doi = {{10.1007/978-3-319-46254-7_44}}, volume = {{639}}, year = {{2016}}, } @inproceedings{160, abstract = {{A task at the beginning of the software development process is the creation of a requirements specification. The requirements specification is usually created by a software engineering expert. We try to substitute this expert by a domain expert (the user) and formulate the problem of creating requirements specifications as a search-based software engineering problem. The domain expert provides only examples of event sequences that describe the behavior of the required software program. These examples are represented by simple sequence diagrams and are divided into two subsets: positive examples of required program behavior and negative examples of prohibited program behavior. The task is then to synthesize a generalized requirements specification that usefully describes the required software. We approach this problem by applying a genetic algorithm and evolve deterministic finite automata (DFAs). These DFAs take the sequence diagrams as input that should be either accepted (positive example) or rejected (negative example). The problem is neither to find the minimal nor the most general automaton. Instead, the user should be provided with several appropriate automata from which the user can select, or which help the user to refine the examples given initially. We present the context of our research ("On-The-Fly Computing"), present our approach, report results indicating its feasibility, and conclude with a discussion.}}, author = {{van Rooijen, Lorijn and Hamann, Heiko}}, booktitle = {{Proceedings of 24th IEEE International Requirements Engineering Conference (RE 2016)}}, pages = {{3----9}}, title = {{{Requirements Specification-by-Example Using a Multi-Objective Evolutionary Algorithm}}}, doi = {{10.1109/REW.2016.015}}, year = {{2016}}, } @inproceedings{280, abstract = {{The Collaborative Research Centre "On-The-Fly Computing" works on foundations and principles for the vision of the Future Internet. It proposes the paradigm of On-The-Fly Computing, which tackles emerging worldwide service markets. In these markets, service providers trade software, platform, and infrastructure as a service. Service requesters state requirements on services. To satisfy these requirements, the new role of brokers, who are (human) actors building service compositions on the fly, is introduced. Brokers have to specify service compositions formally and comprehensively using a domain-specific language (DSL), and to use service matching for the discovery of the constituent services available in the market. The broker's choice of the DSL and matching approaches influences her success of building compositions as distinctive properties of different service markets play a significant role. In this paper, we propose a new approach of engineering a situation-specific DSL by customizing a comprehensive, modular DSL and its matching for given service market properties. This enables the broker to create market-specific composition specifications and to perform market-specific service matching. As a result, the broker builds service compositions satisfying the requester's requirements more accurately. We evaluated the presented concepts using case studies in service markets for tourism and university management.}}, author = {{Arifulina, Svetlana and Platenius, Marie Christin and Mohr, Felix and Engels, Gregor and Schäfer, Wilhelm}}, booktitle = {{Proceedings of the IEEE 11th World Congress on Services (SERVICES), Visionary Track: Service Composition for the Future Internet}}, pages = {{333----340}}, title = {{{Market-Specific Service Compositions: Specification and Matching}}}, doi = {{10.1109/SERVICES.2015.58}}, year = {{2015}}, } @inbook{293, abstract = {{Opinion mining from physician rating websites depends on the quality of the extracted information. Sometimes reviews are user-error prone and the assigned stars or grades contradict the associated content. We therefore aim at detecting random individual error within reviews. Such errors comprise the disagreement in polarity of review texts and the respective ratings. The challenges that thereby arise are (1) the content and sentiment analysis of the review texts and (2) the removal of the random individual errors contained therein. To solve these tasks, we assign polarities to automatically recognized opinion phrases in reviews and then check for divergence in rating and text polarity. The novelty of our approach is that we improve user-generated data quality by excluding error-prone reviews on German physician websites from average ratings.}}, author = {{Geierhos, Michaela and Bäumer, Frederik Simon and Schulze, Sabine and Stuß, Valentina}}, booktitle = {{Proceedings of the 28th International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems (IEA/AIE 2015)}}, editor = {{Ali, Moonis and Kwon, Young Sig and Lee, Chang-Hwan and Kim, Juntae and Kim, Yongdai }}, isbn = {{978-3-319-19065-5}}, location = {{Seoul, South Korea}}, pages = {{305--315}}, publisher = {{Springer}}, title = {{{Filtering Reviews by Random Individual Error}}}, doi = {{10.1007/978-3-319-19066-2_30}}, volume = {{9101}}, year = {{2015}}, } @inproceedings{313, abstract = {{Nowadays, many service providers offer software components in the form of Software as a Service. Requesters that want to discover those services in order to use or to integrate them, need to find out which service satisfies their requirements best. For this purpose, service matching approaches determine how well the specifications of provided services satisfy their requirements (including structural, behavioral, and non-functional requirements). In this paper, we describe the tool-suite MatchBox that allows the integration of existing service matchers and their combination as part of flexibly configurable matching processes. Taking requirements and service specifications as an input, MatchBox is able to execute such matching processes and deliver rich matching results. In contrast to related tools, MatchBox allows users to take into account many different kinds of requirements, while it also provides the flexibility to control the matching process in many different ways. }}, author = {{Börding, Paul and Bruns, Melanie and Platenius, Marie Christin}}, booktitle = {{10th Joint Meeting of the European Software Engineering Conference and the ACM SIGSOFT Symposium on the Foundations of Software Engineering (ESEC/FSE'15)}}, pages = {{974----977}}, title = {{{Comprehensive Service Matching with MatchBox}}}, doi = {{10.1145/2786805.2803181}}, year = {{2015}}, } @inproceedings{279, abstract = {{Service discovery in global software markets is performed by brokers who act as intermediaries between service consumers and service providers.In order to discover services, brokers apply service matching for determining whether the specification of a provided service satisfies the consumer's requirements.Brokers can already choose between a lot of different service matching approaches considering different service properties (structural, behavioral, and non-functional properties).Different matching approaches can be combined into configurable matching processes leading to a high matching quality (e.g., accurate matching results). However, this combination and configuration is a manual procedure and has to be repeated for different consumers' or market requirements regarding matching quality. In this paper, we propose our framework MatchBox, which supports a broker in reusing existing matching approaches and combining them in a model-driven way based on a reconfigurable model of the matching process.Using this reconfigurable model, MatchBox takes care of control and data flow between matching approaches and executes the modeled processes automatically.As a case study, we integrated eleven matchers into MatchBox to demonstrate that it remains flexibility and reduces effort for a broker at the same time.}}, author = {{Platenius, Marie Christin and Arifulina, Svetlana and Schäfer, Wilhelm}}, booktitle = {{Proceedings of the 18th International ACM Sigsoft Symposium on Component-Based Software Engineering (CBSE'15)}}, pages = {{75--84}}, title = {{{MatchBox: A Framework for Dynamic Configuration of Service Matching Processes}}}, doi = {{10.1145/2737166.2737174}}, year = {{2015}}, } @inproceedings{231, abstract = {{Existing approaches towards service composition demand requirements of the customers in terms of service templates, service query profiles, or partial process models. However, addressed non-expert customers may be unable to fill-in the slots of service templates as requested or to describe, for example, pre- and postconditions, or even have difficulties in formalizing their requirements. Thus, our idea is to provide non-experts with suggestions how to complete or clarify their requirement descriptions written in natural language. Two main issues have to be tackled: (1) partial or full inability (incapacity) of non-experts to specify their requirements correctly in formal and precise ways, and (2) problems in text analysis due to fuzziness in natural language. We present ideas how to face these challenges by means of requirement disambiguation and completion. Therefore, we conduct ontology-based requirement extraction and similarity retrieval based on requirement descriptions that are gathered from App marketplaces. The innovative aspect of our work is that we support users without expert knowledge in writing their requirements by simultaneously resolving ambiguity, vagueness, and underspecification in natural language.}}, author = {{Geierhos, Michaela and Schulze, Sabine and Bäumer, Frederik Simon}}, booktitle = {{Proceedings of the 7th International Conference on Agents and Artificial Intelligence (ICAART), Special Session on Partiality, Underspecification, and Natural Language Processing (PUaNLP 2015)}}, editor = {{Loiseau, Stephane and Filipe, Joaquim and Duval, Béatrice and van den Herik, Jaap}}, isbn = {{ 978-989-758-073-4}}, pages = {{277--283}}, publisher = {{SciTePress - Science and Technology Publications}}, title = {{{What did you mean? Facing the Challenges of User-generated Software Requirements}}}, doi = {{10.5220/0005346002770283}}, year = {{2015}}, } @inproceedings{346, abstract = {{One future goal of service-oriented computing is to realize global markets of composed services. On such markets, service providers offer services that can be flexibly combined with each other. However, most often, market participants are not able to individually estimate the quality of traded services in advance. As a consequence, even potentially profitable transactions between customers and providers might not take place. In the worst case, this can induce a market failure. To overcome this problem, we propose the incorporation of reputation information as an indicator for expected service quality. We address On-The-Fly Computing as a representative environment of markets of composed services. In this environment, customers provide feedback on transactions. We present a conceptual design of a reputation system which collects and processes user feedback, and provides it to participants in the market. Our contribution includes the identification of requirements for such a reputation system from a technical and an economic perspective. Based on these requirements, we propose a flexible solution that facilitates the incorporation of reputation information into markets of composed services while simultaneously preserving privacy of customers who provide feedback. The requirements we formulate in this paper have just been partially met in literature. An integrated approach, however, has not been addressed yet.}}, author = {{Brangewitz, Sonja and Jungmann, Alexander and Petrlic, Ronald and Platenius, Marie Christin}}, booktitle = {{Proceedings of the 6th International Conferences on Advanced Service Computing (SERVICE COMPUTATION)}}, pages = {{49--57}}, title = {{{Towards a Flexible and Privacy-Preserving Reputation System for Markets of Composed Services}}}, year = {{2014}}, } @inproceedings{364, abstract = {{Today, software components are traded on markets in form of services. These services can also be service compositions consisting of several services. If a software architect wants to provide such a service composition in the market for trade, she needs to perform several tasks: she needs to model the composition, to discover existing services to be part of that composition, and to analyze the composition's functional correctness as well as its quality, e.g., performance. Up to now, the architect needed to find and use different tools for these tasks. Typically, these tools are not interoperable with each other. We provide the tool SeSAME that supports a software architect in all of these tasks. SeSAME is an integrated Eclipse-based tool-suite providing a comprehensive service specification language to model service compositions and existing services. Furthermore, it includes modules for service matching, functional analysis, and non-functional analysis. SeSAME is the first tool that integrates all these tasks into one tool-suite and, thereby, provides holistic support for trading software services. Thus, it contributes to a software provider's market success.}}, author = {{Arifulina, Svetlana and Becker, Matthias and Platenius, Marie Christin and Walther, Sven}}, booktitle = {{Proceedings of the 29th IEEE/ACM International Conference on Automated Software Engineering (ASE 2014)}}, pages = {{839--842}}, title = {{{SeSAME: Modeling and Analyzing High-Quality Service Compositions}}}, doi = {{10.1145/2642937.2648621}}, year = {{2014}}, } @inproceedings{366, abstract = {{On-The-Fly (OTF) Computing constitutes an approach towards highly dynamic and individualized software markets. Based on service-oriented computing, OTF Computing is about realizing global markets of services that can be flexibly combined. We report on our current research activities, the security and privacy implications thereof, and our approaches to tackle the challenges. Furthermore, we discuss how the security and privacy challenges are addressed in research projects similar to OTF Computing.}}, author = {{Petrlic, Ronald and Jungmann, Alexander and Platenius, Marie Christin and Schäfer, Wilhelm and Sorge, Christoph}}, booktitle = {{Tagungsband der 4. Konferenz Software-Technologien und -Prozesse (STeP 2014)}}, pages = {{131--142}}, title = {{{Security and Privacy Challenges in On-The-Fly Computing}}}, year = {{2014}}, } @article{375, abstract = {{Many software development, planning, or analysis tasks require an up-to-date software architecture documentation. However, this documentation is often outdated, unavailable, or at least not available as a formal model which analysis tools could use. Reverse engineering methods try to fill this gap. However, as they process the system’s source code, they are easily misled by design deficiencies (e.g., violations of component encapsulation) which leaked into the code during the system’s evolution. Despite the high impact of design deficiencies on the quality of the resulting software architecture models, none of the surveyed related works is able to cope with them during the reverse engineering process. Therefore, we have developed the Archimetrix approach which semi-automatically recovers the system’s concrete architecture in a formal model while simultaneously detecting and removing design deficiencies. We have validated Archi metrix on a case-study system and two implementation variants of the CoCoME benchmark system. Results show that the removal of relevant design deficiencies leads to an architecture model which more closely matches the system’s conceptual architecture.}}, author = {{von Detten, Markus and Platenius, Marie Christin and Becker, Steffen}}, journal = {{Journal of Software and Systems Modeling}}, number = {{4}}, pages = {{1239----1268}}, publisher = {{Springer}}, title = {{{Reengineering Component-Based Software Systems with Archimetrix}}}, doi = {{10.1007/s10270-013-0341-9}}, year = {{2014}}, } @article{390, abstract = {{In software markets of the future, customer-specific software will be developed on demand based on distributed software and hardware services. Based on a customer-specific request, available service offers have to be discovered and composed into sophisticated IT services that fulfill the customer's request.A prerequisite of this vision are rich service descriptions, which comprise structural as well as behavioral aspects of the services, otherwise an accurate service discovery and composition is not possible. However, automatic matching of service requests and offers specified in rich service descriptions for the purpose of service discovery is a complex task, due to the multifaceted heterogeneity of the service partners. This heterogeneity includes the use of different specification languages, different underlying ontologies, or different levels of granularity in the specification itself.In this article, we present a comprehensive approach for service discovery and composition, which overcomes the underlying heterogeneity of the service partners. Based on a realistic case study of our industrial partner from the e-tourism domain, we first introduce an automatic matching mechanism for service requests and offers specified in a rich service description language. In addition, we propose an automatic service composition approach, which determines possible service compositions by composing the service protocols through a composition strategy based on labeled transition systems. }}, author = {{Huma, Zille and Gerth, Christian and Engels, Gregor}}, journal = {{Computer Science - Research and Development}}, number = {{3-4}}, pages = {{333--361}}, publisher = {{Springer}}, title = {{{On-the-Fly Computing: Automatic Service Discovery and Composition in Heterogeneous Domains}}}, doi = {{10.1007/s00450-014-0254-z}}, year = {{2014}}, } @inproceedings{449, abstract = {{Cloud computing resulted in a continuously growing number of provided software services to be used by consumers. Brokers discover services that fit best to consumers' requirements by matching Qualityof-Service (QoS) properties. In order to negotiate Service-Level Agreements (SLAs), a provider has to determine the provided QoS based on QoS analyses. However, the risk for the provider to violate the SLA is high as the service's actual quality can deviate from the specified QoS due to uncertainties that occur during the provider's quality analysis. In this paper, we discuss current software engineering paradigms like cloud computing and service-oriented computing with respect to the amount of uncertainty they induce into service matching and SLA negotiations. As a result, we explain, why cloud computing reduces such uncertainties.}}, author = {{Becker, Matthias and Platenius, Marie Christin and Becker, Steffen}}, booktitle = {{Proceedings of the 2nd International Workshop on Cloud Service Brokerage (CSB)}}, pages = {{153--159}}, title = {{{Cloud Computing Reduces Uncertainties in Quality-of-Service Matching!}}}, doi = {{10.1007/978-3-319-14886-1_15}}, year = {{2014}}, } @inproceedings{394, abstract = {{Service Oriented Architectures (SOAs) enable the reuse and substitution of software services to develop highly flexible software systems. To benefit from the growing plethora of available services, sophisticated service discovery approaches are needed that bring service requests and offers together. Such approaches rely on rich service descriptions, which specify also the behavior of provided/requested services, e.g., by pre- and postconditions of operations. As a base for the specification a data schema is used, which specifies the used data types and their relations. However, data schemas are typically heterogeneous wrt. their structure and terminology, since they are created individually in their diverse application contexts. As a consequence the behavioral models that are typed over the heterogeneous data schemas, cannot be compared directly. In this paper, we present an holistic approach to normalize rich service description models to enable behavior-aware service discovery. The approach consists of a matching algorithm that helps to resolve structural and terminological heterogeneity in data schemas by exploiting domain-specific background ontologies. The resulting data schema mappings are represented in terms of Query View Transformation (QVT) relations that even reflect complex n:m correspondences. By executing the transformation, behavioral models are automatically normalized, which is a prerequisite for a behavior-aware operation matching.}}, author = {{Schwichtenberg, Simon and Gerth, Christian and Huma, Zille and Engels, Gregor}}, booktitle = {{Proceedings of the 10th European Conference on Modelling Foundations and Applications (ECMFA)}}, pages = {{180--195}}, title = {{{Normalizing Heterogeneous Service Description Models with Generated QVT Transformations}}}, doi = {{10.1007/978-3-319-09195-2_12}}, year = {{2014}}, } @inproceedings{401, abstract = {{Service matching approaches determine to what extent a provided service matches a requester's requirements. This process is based on service specifications describing functional (e.g., signatures) as well as non-functional properties (e.g., privacy policies). However, we cannot expect service specifications to be complete as providers do not want to share all details of their services' implementation. Moreover, creating complete specifications requires much effort. In this paper, we propose a novel service matching approach taking into account a service's signatures and privacy policies. In particular, our approach applies fuzzy matching techniques that are able to deal with incomplete service specifications. As a benefit, decision-making based on matching results is improved and service matching becomes better applicable in practice.}}, author = {{Platenius, Marie Christin and Arifulina, Svetlana and Petrlic, Ronald and Schäfer, Wilhelm}}, booktitle = {{Proceedings of the 4th International Workshop on Adaptive Services for the Future Internet}}, pages = {{6--17}}, title = {{{Matching of Incomplete Service Specifications Exemplified by Privacy Policy Matching}}}, doi = {{10.1007/978-3-319-14886-1_2}}, year = {{2014}}, } @inproceedings{402, abstract = {{Various approaches in service engineering are based on servicemarkets where brokers use service matching in order to performservice discovery. For matching, a broker translates the specifications ofproviders' services and requesters' requirements into her own specificationlanguage, in order to check their compliance using a matcher. Thebroker's success depends on the configuration of her language and itsmatcher because they in uence important properties like the effort forproviders and requesters to create suitable specifications as well as accuracyand runtime of matching. However, neither existing service specification languages, nor existing matching approaches are optimized insuch way. Our approach automatically provides brokers with an optimalconfiguration of a language and its matcher to improve her success ina given market with respect to her strategy. The approach is based onformalized configuration properties and a predefined set of configurationrules.}}, author = {{Arifulina, Svetlana and Platenius, Marie Christin and Gerth, Christian and Becker, Steffen and Engels, Gregor and Schäfer, Wilhelm}}, booktitle = {{Proceedings of the 12th International Conference on Service Oriented Computing (ICSOC 2014)}}, editor = {{Franch, Xavier and Ghose, AdityaK. and Lewis, GraceA. and Bhiri, Sami}}, pages = {{543--550}}, title = {{{Market-optimized Service Specification and Matching}}}, doi = {{10.1007/978-3-662-45391-9_47}}, year = {{2014}}, } @inproceedings{409, abstract = {{Service markets provide software components in the formof services. In order to enable a service discovery that satisfies servicerequesters and providers best, markets need automatic service matching:approaches for comparing whether a provided service satisfies a servicerequest. Current markets, e.g., app markets, are limited to basic keywordbasedsearch although many better suitable matching approaches aredescribed in literature. However, necessary architectural decisions forthe integration of matchers have a huge impact on quality propertieslike performance or security.Architectural decisions wrt. servicematchers have rarely been discussed,yet, and systematic approaches for their integration into service marketsare missing. In this paper, we present a systematic integration approachincluding the definition of requirements and a discussion on architecturaltactics. As a benefit, the decision-making process of integrating servicematchers is supported and the overall market success can be improved.}}, author = {{Platenius, Marie Christin and Becker, Steffen and Schäfer, Wilhelm}}, booktitle = {{Proceedings of the 8th European Conference on Software Architecture (ECSA 2014)}}, editor = {{Avgeriou, Paris and Zdun, Uwe}}, pages = {{210--217}}, title = {{{Integrating Service Matchers into a Service Market Architecture}}}, doi = {{10.1007/978-3-319-09970-5_19}}, year = {{2014}}, } @inproceedings{484, abstract = {{One of the main ideas of Service-Oriented Computing (SOC) is the delivery of flexibly composable services provided on world-wide markets. For a successful service discovery,service requests have to be matched with the available service offers. However, in a situation in which no service that completely matches the request can be discovered, the customer may tolerate slight discrepancies between request and offer. Some existing fuzzy matching approaches are able to detectsuch service variants, but they do not allow to explicitly specify which parts of a request are not mandatory. In this paper, we improve an existing service matching approach based onVisual Contracts leveraging our preliminary work of design pattern detection. Thereby, we support explicit specifications of service variants and realize gradual matching results that can be ranked in order to discover the service offer that matches a customer’s request best.}}, author = {{Platenius, Marie Christin and von Detten, Markus and Gerth, Christian and Schäfer, Wilhelm and Engels, Gregor}}, booktitle = {{IEEE 20th International Conference on Web Services (ICWS 2013)}}, pages = {{613--614}}, title = {{{Service Matching under Consideration of Explicitly Specified Service Variants}}}, doi = {{10.1109/ICWS.2013.98}}, year = {{2013}}, } @inproceedings{506, abstract = {{Alle Dom{\"a}nen und Branchen der heutigen Wirtschaft sind auf eine effiziente und effektive Entwicklung von ben{\"o}tigten Softwaresystemen angewiesen. Das 40 Jahre alte Prinzip der Beschaffung von Softwaresystemen durch den Einkauf von teuren, relativ unflexiblen Standardl{\"o}sungen beziehungsweise der noch teureren Erstellung durch Softwareh{\"a}user oder eigene Softwareabteilungen muss deshalb in Frage gestellt werden. Mit dem Einsatz von Cloud Computing-Techniken wird es m{\"o}glich, Softwaresysteme und die f{\"u}r den Betrieb ben{\"o}tigten Ressourcen nur bei Bedarf und nur in der ben{\"o}tigten Form einzukaufen. Mit dem Ansatz der service-orientierten Architekturen stehen Methoden zur Verf{\"u}gung, Software zumindest unternehmensintern flexibel zusammenzustellen. Diese ersten Ans{\"a}tze f{\"u}r eine neue Art der Entwicklung und des Betriebs von Softwaresystemen bilden den Ausgangspunkt f{\"u}r die Forschungen in dem seit 2011 laufenden DFG Sonderforschungsbereich (SFB) 901 „On-The-Fly Computing“ an der Universit{\"a}t Paderborn. Die Vision des On-The-Fly Computing ist, dass die Softwaresysteme der Zukunft aus individuell und automatisch konfigurierten und zur Ausf{\"u}hrung gebrachten Softwarebausteinen bestehen, die auf M{\"a}rkten frei gehandelt werden und flexibel kombinierbar sind. Um zu erforschen, in wie weit diese Vision realisierbar ist, werden Konzepte, Methoden und Techniken entwickelt, die eine weitestgehend automatische Konfiguration, Ausf{\"u}hrung und Adaption von Softwaresystemen aus auf weltweiten M{\"a}rkten verf{\"u}gbaren Services erm{\"o}glichen. Um diese Ziele zu erreichen, arbeiten an der Universit{\"a}t Paderborn Informatiker aus unterschiedlichen Disziplinen wie Softwaretechnik, Algorithmik, Rechnernetze, Systementwurf, Sicherheit und Kryptographie mit Wirtschaftswissenschaftlern zusammen, die ihre spezifische Expertise einbringen, mit der die Organisation und Weiterentwicklung des Marktes vorangetrieben werden kann.}}, author = {{Engels, Gregor}}, booktitle = {{Proceedings of the Multikonferenz Sofware Engineering 2013 (SE 2013)}}, pages = {{17--18}}, title = {{{On-The-Fly Computing -- Das Entwicklungs- und Betriebsparadigma fürSoftwaresysteme der Zukunft}}}, year = {{2013}}, } @inproceedings{527, abstract = {{In the future vision of software engineering, services from world-wide markets are composed automated in order to build custom-made systems.Supporting such scenarios requires an adequate service matching approach.Many existing approaches do not fulfill two key requirements of emerging concepts like On-The-Fly-Computing, namely (1) comprehensiveness, i.e., the consideration of different service aspects that cover not only functional properties, but also non-functional properties and (2) fuzzy matching, i.e., the ability to deliver gradual results in order to cope with a certain extent of uncertainty, incompleteness, and tolerance ranges.In this paper, I present a fuzzy matching process that distinguishes between different fuzziness sources and leverages fuzziness in different matching steps which consider different service aspects, e.g., behavior and quality properties. }}, author = {{Christin Platenius, Marie}}, booktitle = {{Proceedings of the Doctoral Symposium of the 9th joint meeting of the European Software Engineering Conference (ESEC) and the ACM SIGSOFT Symposium on the Foundations of Software Engineering (FSE)}}, pages = {{ 715--718 }}, title = {{{Fuzzy Service Matching in On-The-Fly Computing}}}, doi = {{10.1145/2491411.2492405}}, year = {{2013}}, } @inproceedings{551, abstract = {{In the service-oriented computing domain, the number of available software services steadily increased in recent years, favored by the rise of cloud computing with its attached delivery models like Software-as-a-Service (SaaS). To fully leverage the opportunities provided by these services for developing highly flexible and aligned SOA, integration of new services as well as the substitution of existing services must be simplified. As a consequence, approaches for automated and accurate service discovery and composition are needed. In this paper, we propose an automatic service composition approach as an extension to our earlier work on automatic service discovery. To ensure accurate results, it matches service requests and available offers based on their structural as well as behavioral aspects. Afterwards, possible service compositions are determined by composing service protocols through a composition strategy based on labeled transition systems.}}, author = {{Huma, Zille and Gerth, Christian and Engels, Gregor and Juwig, Oliver}}, booktitle = {{Proceedings of the 11th International Conference on Service Oriented Computing (ICSOC'13)}}, pages = {{524----532}}, title = {{{Automated Service Composition for On-the-Fly SOAs}}}, doi = {{10.1007/978-3-642-45005-1_42}}, year = {{2013}}, } @inproceedings{560, abstract = {{In the last decades, development turned from monolithic software products towards more flexible software components that can be provided on world-wide markets in form of services. Customers request such services or compositions of several services. However, in many cases, discovering the best services to address a given request is a tough challenge and requires expressive, gradual matching results, considering different aspects of a service description, e.g., inputs/ouputs, protocols, or quality properties. Furthermore,in situations in which no service exactly satifies the request, approximate matching which can deal with a certain amount of fuzziness becomes necessary. There is a wealth of service matching approaches, but it is not clear whether there is a comprehensive, fuzzy matching approach which addresses all these challenges. Although there are a few service matchingsurveys, none of them is able to answer this question. In this paper, we perform a systematic literature survey of 35 (outof 504) service matching approaches which consider fuzzy matching. Based on this survey, we propose a classication,discuss how different matching approaches can be combined into a comprehensive matching method, and identify future research challenges.}}, author = {{Platenius, Marie and von Detten, Markus and Becker, Steffen and Schäfer, Wilhelm and Engels, Gregor}}, booktitle = {{Proceedings of the 16th International ACM Sigsoft Symposium on Component-Based Software Engineering}}, pages = {{143--152}}, title = {{{A Survey of Fuzzy Service Matching Approaches in the Context of On-The-Fly Computing}}}, doi = {{10.1145/2465449.2465454}}, year = {{2013}}, } @inproceedings{572, abstract = {{Service-oriented computing (SOC) promises to solve many issues in the area of distributed software development, e.g. the realization of the loose coupling pattern in practice through service discovery and invocation. For this purpose, service descriptions must comprise structural as well as behavioral information of the services otherwise an accurate service discovery is not possible. We addressed this issue in our previous paper and proposed a UML-based rich service description language (RSDL) providing comprehensive notations to specify service requests and offers.However, the automatic matching of service requests and offers specified in a RSDL for the purpose of service discovery is a complex task, due to multifaceted heterogeneity of the service partners. This heterogeneity includes the use of different underlying ontologies or different levels of granularity in the specification itself resulting in complex mappings between service requests and offers. In this paper, we present an automatic matching mechanism for service requests and offers specified in a RSDL that overcomes the underlying heterogeneity of the service partners.}}, author = {{Huma, Zille and Gerth, Christian and Engels, Gregor and Juwig, Oliver}}, booktitle = {{Proceedings of the ACM/IEEE 15th International Conference on Model Driven Engineering Languages and Systems (MoDELS)}}, pages = {{709----725}}, title = {{{Towards an Automatic Service Discovery for UML-based Rich Service Descriptions}}}, doi = {{10.1007/978-3-642-33666-9_45}}, year = {{2012}}, } @inproceedings{573, abstract = {{In software markets of the future, customer-specific software will be developed on demand from distributed software and hardware services available on world-wide markets. Having a request, services have to be automatically discovered and composed. For that purpose, services have to be matched based on their specifications. For the accurate matching, services have to be described comprehensively that requires the integration of different domain-specific languages (DSLs) used for functional, non-functional, and infrastructural properties. Since different service providers use plenty of language dialects to model the same service property, their integration is needed for the matching. In this paper, we propose a framework for integration of DSLs. It is based on a parameterized abstract core language that integrates key concepts needed to describe a service. Parts of the core language can be substituted with concrete DSLs. Thus, the framework serves as a basis for the comprehensive specification and automatic matching of services.}}, author = {{Arifulina, Svetlana}}, booktitle = {{Proceedings of the Doctoral Symposium of the 5th International Conference on Software Language Engineering 2012, Dresden, Germany (SLE (Doctoral Symposium))}}, editor = {{W. Eisenecker, Ulrich and Bucholdt, Christian}}, pages = {{23----26}}, title = {{{Towards a Framework for the Integration of Modeling Languages}}}, year = {{2012}}, } @inproceedings{622, abstract = {{Behavioral modeling languages are most useful if their behavior is specified formally such that it can e.g. be analyzed and executed automatically. Obviously, the quality of such behavior specifications is crucial. The rule-based semantics specification technique Dynamic Meta Modeling (DMM) honors this by using the approach of Test-driven Semantics Specification (TDSS), which makes sure that the specification at hand at least describes the correct behavior for a suite of test models. However, in its current state TDSS does not provide any means to measure the quality of such a test suite. In this paper, we describe how we have applied the idea of test coverage to TDSS. Similar to common approaches of defining test coverage criteria, we describe a data structure called invocation graph containing possible orders of applications ofDMM rules. Then we define different coverage criteria based on that data structure, taking the rule applications caused by the test suite’s models into account. Our implementation of the described approach gives the language engineer using DMM a means to reason about the quality of the language’s test suite, and also provides hints on how to improve that quality by adding dedicated test models to the test suite.}}, author = {{Arifulina, Svetlana and Engels, Gregor and Soltenborn, Christian}}, booktitle = {{Proceedings of the 11th International Workshop on Graph Transformation and Visual Modeling Techniques (GT-VMT)}}, title = {{{Coverage Criteria for Testing DMM Specifications}}}, doi = {{10.14279/tuj.eceasst.47.718}}, year = {{2012}}, } @inproceedings{630, abstract = {{Maintaining software systems requires up-to-date models of these systems to systematically plan, analyse and execute the necessary reengineering steps. Often, no or only outdated models of such systems exist. Thus, a reverse engineering step is needed that recovers the system’s components, subsystems and connectors. However, reverse engineering methods are severely impacted by design deficiencies in the system’s code base, e.g., they lead to wrong component structures. Several approaches exist today for the reverse engineering of component-based systems, however, none of them explicitly integrates a systematic design deficiency removal into the process to improve the quality of the reverse engineered architecture. Therefore, in our Archimetrix approach, we propose to regard the most relevant deficiencies with respect to the reverse engineered component-based architecture and support reengineers by presenting the architectural consequences of removing a given deficiency. We validate our approach on the Common Component Modeling Example and show that we are able to identify relevant deficiencies and that their removal leads to an improved reengineered architecture.}}, author = {{Platenius, Marie Christin and von Detten, Markus and Becker, Steffen}}, booktitle = {{Proceedings of the 16th European Conference on Software Maintenance and Reengineering (CSMR)}}, pages = {{255--264}}, title = {{{Archimetrix: Improved Software Architecture Recovery in the Presence of Design Deficiencies}}}, doi = {{10.1109/CSMR.2012.33}}, year = {{2012}}, } @inproceedings{631, abstract = {{Maintaining software systems requires up-to-date models of these systems to systematically plan, analyze, and execute the necessary reengineering steps. Often, no or only outdated models of such systems exist.Thus, a reverse engineering step is needed that recovers the system's components, subsystems, and connectors. However, reverse engineering methods are severely impacted by design deficiencies in the system's code base, e.g., they lead to wrong component structures.Therefore, Archimetrix enables the reengineer to detect the most relevant deficiencies with respect to a reverseengineered component-based architecture and supports him by presenting the architectural consequences of removinga given deficiency.}}, author = {{von Detten, Markus}}, booktitle = {{Proceedings of the 19th Working Conference on Reverse Engineering (WCRE)}}, pages = {{503 -- 504 }}, title = {{{Archimetrix: A Tool for Deficiency-Aware Software Architecture Reconstruction}}}, doi = {{10.1109/WCRE.2012.61}}, year = {{2012}}, } @inproceedings{639, abstract = {{Service-oriented computing (SOC) emerges as a promising trend solving many issues in distributed software development. Following the essence of SOC, service descriptions are dened by the service partners based on current standards, e.g., WSDL [15]. However, these standards are mostly structural and do not provide any behavioral description, which may lead to inaccurate service discovery results. There is a requirement for a rich service description language for service partners that encompasses the structural as well as behavioral information in the service description. Furthermore, service discovery based on an automatic matching of these comprehensive service descriptions is a complex task, which is further complicated through the heterogeneity of the service partners' domains in terms of dierent underlying ontologies. In this paper, we propose a rich service description language based on UML, which allows the specication of structural and behavioral features of a service. In addition, we also briefly discuss how some existing matching approaches can be extended to dene an automatic matching mechanism for rich service descriptions resolving the underlying heterogeneity.}}, author = {{Huma, Zille and Gerth, Christian and Engels, Gregor and Juwig, Oliver}}, booktitle = {{Proceedings of the Forum at the CAiSE'12 Conference on Advanced Information Systems Engineering}}, pages = {{90----97}}, title = {{{A UML-based Rich Service Description for Automatic Service Discovery}}}, year = {{2012}}, } @inproceedings{644, abstract = {{In reverse engineering, dynamic pattern detection is accomplished by collecting execution traces and comparing them to expected behavioral patterns. The traces are collected by manually executing the program in question and therefore represent only part of all relevant program behavior. This can lead to false conclusions about the detected patterns. In this paper, we propose to generate all relevant program traces by using symbolic execution. In order to reduce the created trace data, we allow to limit the trace collection to a user-selectable subset of the statically detected pattern candidates.}}, author = {{von Detten, Markus}}, booktitle = {{Proceedings of the 10th ACM SIGPLAN/SIGSOFT Workshop on Program Analysis for Software Tools and Engineering (PASTE)}}, pages = {{17--20}}, title = {{{Towards Systematic, Comprehensive Trace Generation for Behavioral Pattern Detection through Symbolic Execution}}}, doi = {{10.1145/2024569.2024573}}, year = {{2011}}, } @inproceedings{652, abstract = {{In the development process of service-oriented systems, business process models are used at different levels. Typically, high-level business process models that describe business requirements and needs are stepwise refined to the IT level by different business modelers and software architects. As a result, different process model versions must be compared and merged by means of model version control. An important prerequisite for process model version control is an elaborated matching approach that results in precise mappings between different process model versions. The challenge of such an approach is to deal with syntactically different process models that are semantically equivalent. For that purpose, matching techniques must consider the semantics of process modeling languages.In this paper, we present a matching approach for process models in a versioning scenario. Based on a term formalization of process models, we enable an efficient and effective way to match syntactically different but semantically equivalent process models resulting in precise mappings.}}, author = {{Gerth, Christian and Luckey, Markus and Küster, Jochen and Engels, Gregor}}, booktitle = {{Proceedings of the IEEE 8th International Conference on Service Computingt (SCC)}}, pages = {{218----225}}, title = {{{Precise Mappings between Business Process Models in Versioning Scenarios}}}, doi = {{10.1109/SCC.2011.65}}, year = {{2011}}, }