@inproceedings{29047,
  author       = {{Wilke, Adrian and Bannoura, Arwa and Ngonga Ngomo, Axel-Cyrille Ngonga}},
  booktitle    = {{2021 IEEE 15th International Conference on Semantic Computing (ICSC)}},
  pages        = {{241--247}},
  title        = {{{Relicensing Combined Datasets}}},
  doi          = {{10.1109/ICSC50631.2021.00050}},
  year         = {{2021}},
}

@inproceedings{29486,
  author       = {{Firmansyah, Asep Fajar and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 11th on Knowledge Capture Conference}},
  isbn         = {{978-1-4503-8457-5}},
  pages        = {{73–80}},
  publisher    = {{ACM}},
  title        = {{{GATES: Using Graph Attention Networks for Entity Summarization}}},
  doi          = {{10.1145/3460210.3493574}},
  year         = {{2021}},
}

@inbook{29292,
  author       = {{Feldhans, Robert and Wilke, Adrian and Heindorf, Stefan and Shaker, Mohammad Hossein and Hammer, Barbara and Ngonga Ngomo, Axel-Cyrille and Hüllermeier, Eyke}},
  booktitle    = {{Intelligent Data Engineering and Automated Learning – IDEAL 2021}},
  isbn         = {{9783030916077}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Drift Detection in Text Data with Document Embeddings}}},
  doi          = {{10.1007/978-3-030-91608-4_11}},
  year         = {{2021}},
}

@inproceedings{29287,
  abstract     = {{Knowledge graph embedding research has mainly focused on the two smallest
normed division algebras, $\mathbb{R}$ and $\mathbb{C}$. Recent results suggest
that trilinear products of quaternion-valued embeddings can be a more effective
means to tackle link prediction. In addition, models based on convolutions on
real-valued embeddings often yield state-of-the-art results for link
prediction. In this paper, we investigate a composition of convolution
operations with hypercomplex multiplications. We propose the four approaches
QMult, OMult, ConvQ and ConvO to tackle the link prediction problem. QMult and
OMult can be considered as quaternion and octonion extensions of previous
state-of-the-art approaches, including DistMult and ComplEx. ConvQ and ConvO
build upon QMult and OMult by including convolution operations in a way
inspired by the residual learning framework. We evaluated our approaches on
seven link prediction datasets including WN18RR, FB15K-237 and YAGO3-10.
Experimental results suggest that the benefits of learning hypercomplex-valued
vector representations become more apparent as the size and complexity of the
knowledge graph grows. ConvO outperforms state-of-the-art approaches on
FB15K-237 in MRR, Hit@1 and Hit@3, while QMult, OMult, ConvQ and ConvO
outperform state-of-the-approaches on YAGO3-10 in all metrics. Results also
suggest that link prediction performances can be further improved via
prediction averaging. To foster reproducible research, we provide an
open-source implementation of approaches, including training and evaluation
scripts as well as pretrained models.}},
  author       = {{Demir, Caglar and Moussallem, Diego and Heindorf, Stefan and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The 13th Asian Conference on Machine Learning, ACML 2021}},
  title        = {{{Convolutional Hypercomplex Embeddings for Link Prediction}}},
  year         = {{2021}},
}

@inproceedings{29294,
  author       = {{Nickchen, Tobias and Heindorf, Stefan and Engels, Gregor}},
  booktitle    = {{2021 IEEE Winter Conference on Applications of Computer Vision (WACV)}},
  publisher    = {{IEEE}},
  title        = {{{Generating Physically Sound Training Data for Image Recognition of Additively Manufactured Parts}}},
  doi          = {{10.1109/wacv48630.2021.00204}},
  year         = {{2021}},
}

@article{24456,
  abstract     = {{One objective of current research in explainable intelligent systems is to implement social aspects in order to increase the relevance of explanations. In this paper, we argue that a novel conceptual framework is needed to overcome shortcomings of existing AI systems with little attention to processes of interaction and learning. Drawing from research in interaction and development, we first outline the novel conceptual framework that pushes the design of AI systems toward true interactivity with an emphasis on the role of the partner and social relevance. We propose that AI systems will be able to provide a meaningful and relevant explanation only if the process of explaining is extended to active contribution of both partners that brings about dynamics that is modulated by different levels of analysis. Accordingly, our conceptual framework comprises monitoring and scaffolding as key concepts and claims that the process of explaining is not only modulated by the interaction between explainee and explainer but is embedded into a larger social context in which conventionalized and routinized behaviors are established. We discuss our conceptual framework in relation to the established objectives of transparency and autonomy that are raised for the design of explainable AI systems currently.}},
  author       = {{Rohlfing, Katharina J. and Cimiano, Philipp and Scharlau, Ingrid and Matzner, Tobias and Buhl, Heike M. and Buschmeier, Hendrik and Esposito, Elena and Grimminger, Angela and Hammer, Barbara and Haeb-Umbach, Reinhold and Horwath, Ilona and Hüllermeier, Eyke and Kern, Friederike and Kopp, Stefan and Thommes, Kirsten and Ngonga Ngomo, Axel-Cyrille and Schulte, Carsten and Wachsmuth, Henning and Wagner, Petra and Wrede, Britta}},
  issn         = {{2379-8920}},
  journal      = {{IEEE Transactions on Cognitive and Developmental Systems}},
  keywords     = {{Explainability, process ofexplaining andunderstanding, explainable artificial systems}},
  number       = {{3}},
  pages        = {{717--728}},
  title        = {{{Explanation as a Social Practice: Toward a Conceptual Framework for the Social Design of AI Systems}}},
  doi          = {{10.1109/tcds.2020.3044366}},
  volume       = {{13}},
  year         = {{2021}},
}

@inproceedings{29291,
  author       = {{Zahera, Hamada Mohamed Abdelsamee and Heindorf, Stefan and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 11th on Knowledge Capture Conference}},
  publisher    = {{ACM}},
  title        = {{{ASSET: A Semi-supervised Approach for Entity Typing in Knowledge Graphs}}},
  doi          = {{10.1145/3460210.3493563}},
  year         = {{2021}},
}

@inbook{54586,
  author       = {{Manzoor, Ali and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web: ESWC 2021 Satellite Events}},
  isbn         = {{9783030804176}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Unsupervised Relation Extraction Using Sentence Encoding}}},
  doi          = {{10.1007/978-3-030-80418-3_25}},
  year         = {{2021}},
}

@article{45844,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>Abductive reasoning is a non-monotonic formalism stemming from the work of Peirce. It describes the process of deriving the most plausible explanations of known facts. Considering the positive version, asking for sets of variables as explanations, we study, besides the problem of wether there exists a set of explanations, two explanation size limited variants of this reasoning problem (less than or equal to, and equal to a given size bound). In this paper, we present a thorough two-dimensional classification of these problems: the first dimension is regarding the parameterized complexity under a wealth of different parameterizations, and the second dimension spans through all possible Boolean fragments of these problems in Schaefer’s constraint satisfaction framework with co-clones (T. J. Schaefer. The complexity of satisfiability problems. In Proceedings of the 10th Annual ACM Symposium on Theory of Computing, May 1–3, 1978, San Diego, California, USA, R.J. Lipton, W.A. Burkhard, W.J. Savitch, E.P. Friedman, A.V. Aho eds, pp. 216–226. ACM, 1978). Thereby, we almost complete the parameterized complexity classification program initiated by Fellows et al. (The parameterized complexity of abduction. In Proceedings of the Twenty-Sixth AAAI Conference on Articial Intelligence, July 22–26, 2012, Toronto, Ontario, Canada, J. Homann, B. Selman eds. AAAI Press, 2012), partially building on the results by Nordh and Zanuttini (What makes propositional abduction tractable. Artificial Intelligence, 172, 1245–1284, 2008). In this process, we outline a fine-grained analysis of the inherent parameterized intractability of these problems and pinpoint their FPT parts. As the standard algebraic approach is not applicable to our problems, we develop an alternative method that makes the algebraic tools partially available again.</jats:p>}},
  author       = {{Mahmood, Yasir and Meier, Arne and Schmidt, Johannes}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  keywords     = {{Logic, Hardware and Architecture, Arts and Humanities (miscellaneous), Software, Theoretical Computer Science}},
  number       = {{1}},
  pages        = {{266--296}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{Parameterized complexity of abduction in Schaefer’s framework}}},
  doi          = {{10.1093/logcom/exaa079}},
  volume       = {{31}},
  year         = {{2021}},
}

@article{25212,
  abstract     = {{Finding a good query plan is key to the optimization of query runtime. This holds in particular for cost-based federation
engines, which make use of cardinality estimations to achieve this goal. A number of studies compare SPARQL federation engines across different performance metrics, including query runtime, result set completeness and correctness, number of sources selected and number of requests sent. Albeit informative, these metrics are generic and unable to quantify and evaluate the accuracy of the cardinality estimators of cost-based federation engines. To thoroughly evaluate cost-based federation engines, the effect of estimated cardinality errors on the overall query runtime performance must be measured. In this paper, we address this challenge by presenting novel evaluation metrics targeted at a fine-grained benchmarking of cost-based federated SPARQL query engines. We evaluate five cost-based federated SPARQL query engines using existing as well as novel evaluation metrics by using LargeRDFBench queries. Our results provide a detailed analysis of the experimental outcomes that reveal novel insights, useful for the development of future cost-based federated SPARQL query processing engines.}},
  author       = {{Qudus, Umair and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille and Lee, Young-Koo}},
  issn         = {{2210-4968}},
  journal      = {{Semantic Web}},
  keywords     = {{SPARQL, benchmarking, cost-based, cost-free, federated, querying}},
  number       = {{6}},
  pages        = {{843--868}},
  publisher    = {{ISO Press}},
  title        = {{{An Empirical Evaluation of Cost-based Federated SPARQL Query Processing Engines}}},
  doi          = {{10.3233/SW-200420}},
  volume       = {{12}},
  year         = {{2021}},
}

@inbook{54587,
  abstract     = {{<jats:p>With significant growth in RDF datasets, application developers demand online availability of these datasets to meet the end users’ expectations. Various interfaces are available for querying RDF data using SPARQL query language. Studies show that SPARQL end-points may provide high query runtime performance at the cost of low availability. For example, it has been observed that only 32.2% of public endpoints have a monthly uptime of 99–100%. One possible reason for this low availability is the high workload experienced by these SPARQL endpoints. As complete query execution is performed at server side (i.e., SPARQL endpoint), this high query processing workload may result in performance degradation or even a service shutdown. We performed extensive experiments to show the query processing capabilities of well-known triple stores by using their SPARQL endpoints. In particular, we stressed these triple stores with multiple parallel requests from different querying agents. Our experiments revealed the maximum query processing capabilities of these triple stores after which point they lead to service shutdowns. We hope this analysis will help triple store developers to design workload-aware RDF engines to improve the availability of their public endpoints with high throughput.</jats:p>}},
  author       = {{Khan, Hashim and Manzoor, Ali and Ngonga Ngomo, Axel-Cyrille and Saleem, Muhammad}},
  booktitle    = {{Studies on the Semantic Web}},
  issn         = {{1868-1158}},
  publisher    = {{IOS Press}},
  title        = {{{When is the Peak Performance Reached? An Analysis of RDF Triple Stores}}},
  doi          = {{10.3233/ssw210042}},
  year         = {{2021}},
}

@phdthesis{16935,
  author       = {{Moussalem, Diego Campos}},
  title        = {{{Knowledge Graphs for Multilingual Language Translation and Generation}}},
  doi          = {{10.17619/UNIPB/1-980}},
  year         = {{2020}},
}

@inproceedings{20141,
  author       = {{Heindorf, Stefan and Scholten, Yan and Wachsmuth, Henning and Ngonga Ngomo, Axel-Cyrille and Potthast, Martin}},
  booktitle    = {{Proceedings of the 28th ACM International Conference on Information and Knowledge Management (CIKM 2020)}},
  pages        = {{3023--3030}},
  title        = {{{CauseNet: Towards a Causality Graph Extracted from the Web}}},
  doi          = {{10.1145/3340531.3412763}},
  year         = {{2020}},
}

@inbook{29042,
  author       = {{Röder, Michael and Sherif, Mohamed and Saleem, Muhammad and Conrads, Felix and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Knowledge Graphs for eXplainable Artificial Intelligence: Foundations, Applications and Challenges}},
  editor       = {{Tiddi, Ilaria and Lécué, Freddy and Hitzler, Pascal}},
  keywords     = {{dice group_aksw roeder sherif saleem fconrads ngonga}},
  pages        = {{73--97}},
  publisher    = {{IOS Press}},
  title        = {{{Benchmarking the Lifecycle of Knowledge Graphs}}},
  doi          = {{10.3233/SSW200012}},
  year         = {{2020}},
}

@inproceedings{15921,
  abstract     = {{Ranking plays a central role in a large number of applications driven by RDF knowledge graphs. Over the last years, many popular RDF knowledge graphs have grown so large that rankings for the facts they contain cannot be computed directly using the currently common 64-bit platforms. In this paper, we tackle two problems:
Computing ranks on such large knowledge bases efficiently and incrementally. First, we present D-HARE, a distributed approach for computing ranks on very large knowledge graphs. D-HARE assumes the random surfer model and relies on data partitioning to compute matrix multiplications and transpositions on disk for matrices of arbitrary size. Moreover, the data partitioning underlying D-HARE allows the execution of most of its steps in parallel.
As very large knowledge graphs are often updated periodically, we tackle the incremental computation of ranks on large knowledge bases as a second problem. We address this problem by presenting
I-HARE, an approximation technique for calculating the overall ranking scores of a knowledge without the need to recalculate the ranking from scratch at each new revision. We evaluate our approaches by calculating ranks on the 3 × 10^9 and 2.4 × 10^9 triples from Wikidata resp. LinkedGeoData. Our evaluation demonstrates
that D-HARE is the first holistic approach for computing ranks on very large RDF knowledge graphs. In addition, our incremental approach achieves a root mean squared error of less than 10E−7 in the best case. Both D-HARE
 and I-HARE are open-source and are available at: https://github.com/dice-group/incrementalHARE.
}},
  author       = {{Desouki, Abdelmoneim Amer and Röder, Michael and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 30th ACM Conference on Hypertext and Social Media  - HT '19}},
  isbn         = {{9781450368858}},
  keywords     = {{Knowledge Graphs, Ranking, RDF}},
  pages        = {{163--171}},
  publisher    = {{ACM}},
  title        = {{{Ranking on Very Large Knowledge Graphs}}},
  doi          = {{10.1145/3342220.3343660}},
  year         = {{2019}},
}

@inproceedings{46539,
  abstract     = {{This paper describes the Ontology Alignment Evaluation Initiative 2017.5 pre-campaign. Like in 2012, when we transitioned the evaluation to the SEALS platform, we have also conducted a pre-campaign to assess the feasibility of moving to the HOBBIT platform. We report the experiences of this precampaign and discuss the future steps for the OAEI.}},
  author       = {{Jiménez-Ruiz, Ernesto and Saveta, Tzanina and Zamazal, Ondrej and Hertling, Sven and Röder, Michael and Fundulaki, Irini and Ngonga Ngomo, Axel-Cyrille and Sherif, Mohamed and Annane, Amina and Bellahsene, Zohra and Yahia, Sadok Ben and Diallo, Gayo and Faria, Daniel and Kachroudi, Marouen and Khiat, Abderrahmane and Lambrix, Patrick and Li, Huanyu and Mackeprang, Maximilian and Mohammadi, Majid and Rybinski, Maciej and Balasubramani, Booma Sowkarthiga and Trojahn, Cassia}},
  booktitle    = {{Proceedings of the Ontology Matching Workshop 2018}},
  keywords     = {{2018 DICE SIMBA group_aksw ngonga projecthobbit roeder sherif}},
  title        = {{{Introducing the HOBBIT platform into the Ontology Alignment Evaluation Campaign}}},
  year         = {{2018}},
}

@inbook{57234,
  author       = {{Meise, Bianca and Schloots, Franziska Margarete and Müller-Lietzkow, Jörg and Meister, Dorothee M.}},
  booktitle    = {{Interdisziplinäre Perspektiven zur Zukunft der Wertschöpfung}},
  isbn         = {{9783658202644}},
  publisher    = {{Springer Fachmedien Wiesbaden}},
  title        = {{{Interdisziplinäres Projektmanagement – Strategische Handlungsempfehlungen für Kooperationsverbünde in akademischen Kontexten}}},
  doi          = {{10.1007/978-3-658-20265-1_18}},
  year         = {{2017}},
}

