@inbook{32179,
  abstract     = {{This work addresses the automatic resolution of software requirements. In the vision of On-The-Fly Computing, software services should be composed on demand, based solely on natural language input from human users. To enable this, we build a chatbot solution that works with human-in-the-loop support to receive, analyze, correct, and complete their software requirements. The chatbot is equipped with a natural language processing pipeline and a large knowledge base, as well as sophisticated dialogue management skills to enhance the user experience. Previous solutions have focused on analyzing software requirements to point out errors such as vagueness, ambiguity, or incompleteness. Our work shows how apps can collaborate with users to efficiently produce correct requirements. We developed and compared three different chatbot apps that can work with built-in knowledge. We rely on ChatterBot, DialoGPT and Rasa for this purpose. While DialoGPT provides its own knowledge base, Rasa is the best system to combine the text mining and knowledge solutions at our disposal. The evaluation shows that users accept 73% of the suggested answers from Rasa, while they accept only 63% from DialoGPT or even 36% from ChatterBot.}},
  author       = {{Kersting, Joschka and Ahmed, Mobeen and Geierhos, Michaela}},
  booktitle    = {{HCI International 2022 Posters}},
  editor       = {{Stephanidis, Constantine and Antona, Margherita and Ntoa, Stavroula}},
  isbn         = {{9783031064166}},
  issn         = {{1865-0929}},
  keywords     = {{On-The-Fly Computing, Chatbot, Knowledge Base}},
  location     = {{Virtual}},
  pages        = {{419----426}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Chatbot-Enhanced Requirements Resolution for Automated Service Compositions}}},
  doi          = {{10.1007/978-3-031-06417-3_56}},
  volume       = {{1580}},
  year         = {{2022}},
}

@inbook{56579,
  abstract     = {{Question answering engines have become one of the most popular type of applications driven by Semantic Web technologies. Consequently, the provision of means to quantify the performance of current question answering approaches on current datasets has become ever more important. However, a large percentage of the queries found in popular question answering benchmarks cannot be executed on current versions of their reference dataset. There is a consequently a clear need to curate question answering benchmarks periodically. However, the manual alteration of question answering benchmarks is often error-prone. We alleviate this problem by presenting QUANT, a novel framework for the creation and curation of question answering benchmarks. QUANT sup-ports the curation of benchmarks by generating smart edit suggestions for question-query pair and for the corresponding metadata. In addition, our framework supports the creation of new benchmark entries by pro-viding predefined quality checks for queries. We evaluate QUANT on 653questions obtained from QALD-1 to QALD-8 with 10 users. Our results show that our framework generates reliable suggestions and can reduce the curation effort for QA benchmarks by up to 91%.}},
  author       = {{Gusmita, Ria Hari and Jalota, Rricha and Vollmers, Daniel and Reineke, Jan and Ngonga Ngomo, Axel-Cyrille and Usbeck, Ricardo}},
  booktitle    = {{Semantic Systems. The Power of AI and Knowledge Graphs}},
  editor       = {{Acosta, Maribel and Cudr{\'e}-Mauroux, Philippe and Maleshkova, Maria and Pellegrini, Tassilo and Sack, Harald and Sure-Vetter, York}},
  isbn         = {{978-3-030-33219-8}},
  issn         = {{0302-9743}},
  keywords     = {{Benchmark, Question answering, Knowledge base}},
  location     = {{Karlsruhe, Germany}},
  pages        = {{343----358}},
  publisher    = {{Springer International Publishing}},
  title        = {{{QUANT - Question Answering Benchmark Curator}}},
  doi          = {{10.1007/978-3-030-33220-4_25}},
  year         = {{2019}},
}

