@article{25390,
  author       = {{Moussallem, Diego and Arcan, Mihael and Ngonga Ngomo, Axel-Cyrille and Buitelaar, Paul}},
  journal      = {{CoRR}},
  title        = {{{Augmenting Neural Machine Translation with Knowledge Graphs}}},
  volume       = {{abs/1902.08816}},
  year         = {{2019}},
}

@article{25391,
  author       = {{Amer Desouki, Abdelmoneim and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{CoRR}},
  title        = {{{topFiberM: Scalable and Efficient Boolean Matrix Factorization}}},
  volume       = {{abs/1903.10326}},
  year         = {{2019}},
}

@article{25392,
  author       = {{Moussallem, Diego and Wauer, Matthias and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{CoRR}},
  title        = {{{Semantic Web for Machine Translation: Challenges and Directions}}},
  volume       = {{abs/1907.10676}},
  year         = {{2019}},
}

@article{25393,
  author       = {{Ngonga Ngomo, Axel-Cyrille and Moussallem, Diego and Bühmann, Lorenz}},
  journal      = {{CoRR}},
  title        = {{{A Holistic Natural Language Generation Framework for the Semantic Web}}},
  volume       = {{abs/1911.01248}},
  year         = {{2019}},
}

@article{25394,
  author       = {{Röder, Michael and de Souza, Geraldo and Kuchelev, Denis and Amer Desouki, Abdelmoneim and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{CoRR}},
  title        = {{{ORCA: a Benchmark for Data Web Crawlers}}},
  volume       = {{abs/1912.08026}},
  year         = {{2019}},
}

@inproceedings{29037,
  abstract     = {{Existing technologies employ different machine learning approaches to predict disasters from historical environmental data. However, for short-term disasters (e.g., earthquakes), historical data alone has a limited prediction capability. In this work, we consider social media as a supplementary source of knowledge in addition to historical environmental data. Further, we build a joint model that learns from disaster-related tweets and environmental data to improve prediction. We propose the combination of semantically-enriched word embedding to represent entities in tweets with their semantics representations computed with the traditional word2vec. Our experiments show that our proposed approach outperforms the accuracy of state-of-the-art models in disaster prediction.}},
  author       = {{Zahera, Hamada Mohamed Abdelsamee and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{K-CAP 2019: Knowledge Capture Conference}},
  keywords     = {{sys:relevantFor:infai sys:relevantFor:bis sys:relevantFor:simba ngonga simba zahera sherif solide limboproject opal group\_aksw dice}},
  pages        = {{4}},
  title        = {{{Jointly Learning from Social Media and Environmental Data for Typhoon Intensity Prediction}}},
  year         = {{2019}},
}

@inproceedings{29011,
  abstract     = {{In this paper we present LimesWebUI, our web interface of Limes. Limes, the Link Discovery Framework for Metric Spaces, is a framework for dis- covering links between entities contained in Linked Data sources. LimesWebUI assists the end user during the link discovery process. By representing the link specifications (LS) as interlocking blocks, our interface eases the manual creation of links for users who already know which LS they would like to execute. How- ever, most users do not know which LS suits their linking task best and therefore need help throughout this process. Hence, our interface provides wizards which allow the easy configuration of many link discovery machine learning algorithms, that does not require the user to enter a manual LS. We evaluate the usability of the interface by using the standard system usability scale questionnaire. Our over- all usability score of 76.5 suggests that the online interface is consistent, easy to use, and the various functions of the system are well integrated.}},
  author       = {{Sherif, Mohamed and Pestryakova, Svetlana and Dreßler, Kevin and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{18th International Semantic Web Conference (ISWC 2019)}},
  keywords     = {{2019 sys:relevantFor:infai group\_aksw simba sherif kevin ngonga Svetlana slipo limes dice sage limbo opal}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{LimesWebUI – Link Discovery Made Simple}}},
  year         = {{2019}},
}

@inproceedings{29038,
  abstract     = {{An increasing number of heterogeneous datasets abiding by the Linked Data paradigm is published everyday. Discovering links between these datasets is thus central to achieving the vision behind the Data Web. Declarative Link Discovery (LD) frameworks rely on complex Link Specification (LS) to express the conditions under which two resources should be linked. Complex LS combine similarity measures with thresholds to determine whether a given predicate holds between two resources. State of the art LD frameworks rely mostly on string-based similarity measures such as Levenshtein and Jaccard. However, string-based similarity measures often fail to catch the similarity of resources with phonetically similar property values when these property values are represented using different string representation (e.g., names and street labels). In this paper, we evaluate the impact of using phonetics-based similarities in the process of LD. Moreover, we evaluate the impact of phonetic-based similarity measures on a state-of-the-art machine learning approach used to generate LS. Our experiments suggest that the combination of string-based and phonetic-based measures can improve the Fmeasures achieved by LD frameworks on most datasets.}},
  author       = {{Ahmed, Abdullah Fathi Ahmed and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{K-CAP 2019: Knowledge Capture Conference}},
  keywords     = {{sys:relevantFor:infai sys:relevantFor:bis sys:relevantFor:ngonga ahmed sherif solide limboproject opal group_aksw dice}},
  title        = {{{Do your Resources Sound Similar? On the Impact of Using Phonetic Similarity in Link Discovery}}},
  year         = {{2019}},
}

@inproceedings{29012,
  abstract     = {{An increasing number and size of datasets abiding by the Linked Data paradigm are published everyday. Discovering links between these datasets is thus central to achieve the vision behind the Data Web. Declarative Link Discovery (LD) frameworks rely on complex Link Specification (LS) to express the conditions under which two resources should be linked. Understanding such LS is not a trivial task for non-expert users, particularly when such users are interested in generating LS to match their needs. Even if the user applies a machine learning algorithm for the automatic generation of the required LS, the challenge of explaining the resultant LS persists. Hence, providing explainable LS is the key challenge to enable users who are unfamiliar with underlying LS technologies to use them effectively and efficiently. In this paper, we address this problem by proposing a generic approach that allows a LS to be verbalized, i.e., converted into understandable natural language. We propose a summarization approach to the verbalized LS based on the selectivity of the underlying LS. Our adequacy and fluency evaluations show that our approach can generate complete and easily understandable natural language descriptions even by lay users.}},
  author       = {{Fathi Ahmed, Abdullah  and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{24th International Conference on Applications of Natural Language to Information Systems (NLDB 2019)}},
  keywords     = {{2019 sys:relevantFor:infai group\_aksw simba sherif ngonga ahmed slipo limes dice sage limbo opal}},
  publisher    = {{Springer}},
  title        = {{{LSVS: Link Specification Verbalization and Summarization}}},
  year         = {{2019}},
}

@inproceedings{29013,
  abstract     = {{Point of Interest (POI) data constitute the cornerstone of any application, service or product even remotely related to our physical surroundings. From navigation applications to social networks, tourism, and logistics, we use POI data to search, communicate, decide and plan our actions. POIs are semantically diverse and spatio-temporally evolving entities, having geographical, temporal and thematic relations. Currently, integrating POI data to increase their coverage, timeliness, accuracy and value is a resource-intensive and mostly manual process, with no specialized software available to address the specific challenges of this task. In this paper, we present an integrated toolkit for transforming, linking, fusing and enriching POI data, and extracting additional value from them. In particular, we demonstrate how Linked Data technologies can address the limitations, gaps and challenges of the current landscape in Big POI data integration. We have built a prototype application that enables users to define, manage and execute scalable POI data integration workflows built on top of state-of-the-art software for geospatial Linked Data. The application abstracts and hides away the underlying complexity, automates quality-assured integration, scales efficiently for world-scale integration tasks and lowers the entry barrier for end-users. Validated against real-world POI datasets in several application domains, our system has shown great potential to address the requirements and needs of cross-sector, cross-border and cross-lingual integration of Big POI data.}},
  author       = {{Athanasiou, Spiros and Giorgos, Giannopoulos and Damien, Graux and Nikos, Karagiannakis and Jens, Lehmann and Ngonga Ngomo, Axel-Cyrille and Kostas, Patroumpas and Sherif, Mohamed and Skoutas, Dimitrios}},
  booktitle    = {{International Conference on Extending Database Technology 2019, EDBT19}},
  keywords     = {{2019 sys:relevantFor:infai group\_aksw simba sherif ngonga lehmann slipo limes dice deer}},
  title        = {{{Big POI data integration with Linked Data technologies}}},
  year         = {{2019}},
}

@inbook{56579,
  abstract     = {{Question answering engines have become one of the most popular type of applications driven by Semantic Web technologies. Consequently, the provision of means to quantify the performance of current question answering approaches on current datasets has become ever more important. However, a large percentage of the queries found in popular question answering benchmarks cannot be executed on current versions of their reference dataset. There is a consequently a clear need to curate question answering benchmarks periodically. However, the manual alteration of question answering benchmarks is often error-prone. We alleviate this problem by presenting QUANT, a novel framework for the creation and curation of question answering benchmarks. QUANT sup-ports the curation of benchmarks by generating smart edit suggestions for question-query pair and for the corresponding metadata. In addition, our framework supports the creation of new benchmark entries by pro-viding predefined quality checks for queries. We evaluate QUANT on 653questions obtained from QALD-1 to QALD-8 with 10 users. Our results show that our framework generates reliable suggestions and can reduce the curation effort for QA benchmarks by up to 91%.}},
  author       = {{Gusmita, Ria Hari and Jalota, Rricha and Vollmers, Daniel and Reineke, Jan and Ngonga Ngomo, Axel-Cyrille and Usbeck, Ricardo}},
  booktitle    = {{Semantic Systems. The Power of AI and Knowledge Graphs}},
  editor       = {{Acosta, Maribel and Cudr{\'e}-Mauroux, Philippe and Maleshkova, Maria and Pellegrini, Tassilo and Sack, Harald and Sure-Vetter, York}},
  isbn         = {{978-3-030-33219-8}},
  issn         = {{0302-9743}},
  keywords     = {{Benchmark, Question answering, Knowledge base}},
  location     = {{Karlsruhe, Germany}},
  pages        = {{343----358}},
  publisher    = {{Springer International Publishing}},
  title        = {{{QUANT - Question Answering Benchmark Curator}}},
  doi          = {{10.1007/978-3-030-33220-4_25}},
  year         = {{2019}},
}

@inproceedings{57287,
  author       = {{Syed, Zafar Habeeb and Srivastava, Nikit and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the ISWC 2019 Satellite Tracks (Posters & Demonstrations, Industry, and Outrageous Ideas)}},
  editor       = {{Suárez-Figueroa, Mari Carmen and Cheng, Gong and Gentile, Anna Lisa and Guéret, Christophe and Keet, Maria and Bernstein, Abraham}},
  keywords     = {{dice group_aksw ngonga roeder srivastava syed}},
  pages        = {{201–204}},
  publisher    = {{Springer International Publishing}},
  title        = {{{COPAAL – An Interface for Explaining Facts using Corroborative Paths}}},
  volume       = {{2456}},
  year         = {{2019}},
}

@inbook{57286,
  author       = {{Jalota, Rricha and Srivastava, Nikit and Vollmers, Daniel and Speck, René and Röder, Michael and Usbeck, Ricardo and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Rich Search and Discovery for Research Datasets}},
  keywords     = {{dice jalota ngonga roeder speck srivastava vollmers}},
  publisher    = {{SAGE Publications}},
  title        = {{{Finding Datasets in Publications: The University of Paderborn Approach}}},
  year         = {{2019}},
}

@article{26589,
  author       = {{Ahmed Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Semantic Web}},
  number       = {{5}},
  pages        = {{589--604}},
  title        = {{{A systematic survey of point set distance measures for link discovery}}},
  doi          = {{10.3233/SW-170285}},
  volume       = {{9}},
  year         = {{2018}},
}

@article{26590,
  author       = {{Röder, Michael and Usbeck, Ricardo and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Semantic Web}},
  number       = {{5}},
  pages        = {{605--625}},
  title        = {{{GERBIL - Benchmarking Named Entity Recognition and Linking consistently}}},
  doi          = {{10.3233/SW-170286}},
  volume       = {{9}},
  year         = {{2018}},
}

@article{26591,
  author       = {{Saleem, Muhammad and Hasnain, Ali and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{J. Web Semant.}},
  pages        = {{85--125}},
  title        = {{{LargeRDFBench: A billion triples benchmark for SPARQL endpoint federation}}},
  doi          = {{10.1016/j.websem.2017.12.005}},
  volume       = {{48}},
  year         = {{2018}},
}

@article{26592,
  author       = {{Moussallem, Diego and Wauer, Matthias and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{J. Web Semant.}},
  pages        = {{1--19}},
  title        = {{{Machine Translation using Semantic Web Technologies: A Survey}}},
  doi          = {{10.1016/j.websem.2018.07.001}},
  volume       = {{51}},
  year         = {{2018}},
}

@inproceedings{26593,
  author       = {{Habeeb Syed, Zafar and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 27th {ACM} International Conference on Information and Knowledge Management, {CIKM} 2018, Torino, Italy, October 22-26, 2018}},
  editor       = {{Cuzzocrea, Alfredo and Allan, James and W. Paton, Norman and Srivastava, Divesh and Agrawal, Rakesh and Z. Broder, Andrei and J. Zaki, Mohammed and Selçuk Candan, K. and Labrinidis, Alexandros and Schuster, Assaf and Wang, Haixun}},
  pages        = {{1599--1602}},
  publisher    = {{{ACM}}},
  title        = {{{FactCheck: Validating RDF Triples Using Textual Evidence}}},
  doi          = {{10.1145/3269206.3269308}},
  year         = {{2018}},
}

@inproceedings{26594,
  author       = {{Akhter, Adnan and Ngonga Ngomo, Axel-Cyrille and Saleem, Muhammad}},
  booktitle    = {{Knowledge Engineering and Knowledge Management - 21st International Conference, {EKAW} 2018, Nancy, France, November 12-16, 2018, Proceedings}},
  editor       = {{Faron-Zucker, Catherine and Ghidini, Chiara and Napoli, Amedeo and Toussaint, Yannick}},
  pages        = {{3--18}},
  publisher    = {{Springer}},
  title        = {{{An Empirical Evaluation of RDF Graph Partitioning Techniques}}},
  doi          = {{10.1007/978-3-030-03667-6\_1}},
  volume       = {{11313}},
  year         = {{2018}},
}

@inproceedings{26595,
  author       = {{Speck, René and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Knowledge Engineering and Knowledge Management - 21st International Conference, {EKAW} 2018, Nancy, France, November 12-16, 2018, Proceedings}},
  editor       = {{Faron-Zucker, Catherine and Ghidini, Chiara and Napoli, Amedeo and Toussaint, Yannick}},
  pages        = {{424--438}},
  publisher    = {{Springer}},
  title        = {{{On Extracting Relations Using Distributional Semantics and a Tree Generalization}}},
  doi          = {{10.1007/978-3-030-03667-6\_27}},
  volume       = {{11313}},
  year         = {{2018}},
}

