@inproceedings{57240,
  abstract     = {{Validating assertions before adding them to a knowledge graph is an essential part of its creation and maintenance. Due to the sheer size of knowledge graphs, automatic fact-checking approaches have been developed. These approaches rely on reference knowledge to decide whether a given assertion is correct. Recent hybrid approaches achieve good results by including several knowledge sources. However, it is often impractical to provide a sheer quantity of textual knowledge or generate embedding models to leverage these hybrid approaches. We present FaVEL, an approach that uses algorithm selection and ensemble learning to amalgamate several existing fact-checking approaches that rely solely on a reference knowledge graph and, hence, use fewer resources than current hybrid approaches. For our evaluation, we create updated versions of two existing datasets and a new dataset dubbed FaVEL-DS. Our evaluation compares our approach to 15 fact-checking approaches—including the state-of-the-art approach HybridFC—on 3 datasets. Our results demonstrate that FaVEL outperforms all other approaches significantly by at least 0.04 in terms of the area under the ROC curve. Our source code, datasets, and evaluation results are open-source and can be found at https://github.com/dice-group/favel.}},
  author       = {{Qudus, Umair and Röder, Michael and Tatkeu Pekarou, Franck Lionel and Morim da Silva, Ana Alexandra and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{EKAW 2024}},
  editor       = {{Rospocher, Marco}},
  keywords     = {{fact checking, ensemble learning, transfer learning, knowledge management.}},
  location     = {{Amsterdam, Netherlands}},
  title        = {{{FaVEL: Fact Validation Ensemble Learning}}},
  year         = {{2024}},
}

@inproceedings{50479,
  abstract     = {{Verifying assertions is an essential part of creating and maintaining knowledge graphs. Most often, this task cannot be carried out manually due to the sheer size of modern knowledge graphs. Hence, automatic fact-checking approaches have been proposed over the last decade. These approaches aim to compute automatically whether a given assertion is correct or incorrect. However, most fact-checking approaches are binary classifiers that fail to consider the volatility of some assertions, i.e., the fact that such assertions are only valid at certain times or for specific time intervals. Moreover, the few approaches able to predict when an assertion was valid (i.e., time-point prediction approaches) rely on manual feature engineering. This paper presents TEMPORALFC, a temporal fact-checking approach that uses multiple sources of background knowledge to assess the veracity and temporal validity of a given assertion. We evaluate TEMPORALFC on two datasets and compare it to the state of the art in fact-checking and time-point prediction. Our results suggest that TEMPORALFC outperforms the state of the art on the fact-checking task by 0.13 to 0.15 in terms of Area Under the Receiver Operating Characteristic curve and on the time-point prediction task by 0.25 to 0.27 in terms of Mean Reciprocal Rank. Our code is open-source and can be found at https://github.com/dice-group/TemporalFC.}},
  author       = {{Qudus, Umair and Röder, Michael and Kirrane, Sabrina and Ngomo, Axel-Cyrille Ngonga}},
  booktitle    = {{The Semantic Web – ISWC 2023}},
  editor       = {{R. Payne, Terry and Presutti, Valentina and Qi, Guilin and Poveda-Villalón, María and Stoilos, Giorgos and Hollink, Laura and Kaoudi, Zoi and Cheng, Gong and Li, Juanzi}},
  isbn         = {{9783031472398}},
  issn         = {{0302-9743}},
  keywords     = {{temporal fact checking · ensemble learning · transfer learning · time-point prediction · temporal knowledge graphs}},
  location     = {{Athens, Greece}},
  pages        = {{465–483}},
  publisher    = {{Springer, Cham}},
  title        = {{{TemporalFC: A Temporal Fact Checking Approach over Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-47240-4_25}},
  volume       = {{14265}},
  year         = {{2023}},
}

@article{46248,
  author       = {{Demir, Caglar and Wiebesiek, Michel and Lu, Renzhong and Ngonga Ngomo, Axel-Cyrille and Heindorf, Stefan}},
  journal      = {{ECML PKDD}},
  location     = {{Torino}},
  title        = {{{LitCQD: Multi-Hop Reasoning in Incomplete Knowledge Graphs with Numeric Literals}}},
  year         = {{2023}},
}

@inproceedings{33734,
  abstract     = {{Many applications require explainable node classification in knowledge graphs. Towards this end, a popular ``white-box'' approach is class expression learning: Given sets of positive and negative nodes, class expressions in description logics are learned that separate positive from negative nodes. Most existing approaches are search-based approaches generating many candidate class expressions and selecting the best one. However, they often take a long time to find suitable class expressions. In this paper, we cast class expression learning as a translation problem and propose a new family of class expression learning approaches which we dub neural class expression synthesizers. Training examples are ``translated'' into class expressions in a fashion akin to machine translation. Consequently, our synthesizers are not subject to the runtime limitations of search-based approaches. We study three instances of this novel family of approaches based on LSTMs, GRUs, and set transformers, respectively. An evaluation of our approach on four benchmark datasets suggests that it can effectively synthesize high-quality class expressions with respect to the input examples in approximately one second on average. Moreover, a comparison to state-of-the-art approaches suggests that we achieve better F-measures on large datasets. For reproducibility purposes, we provide our implementation as well as pretrained models in our public GitHub repository at https://github.com/dice-group/NeuralClassExpressionSynthesis}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 20th Extended Semantic Web Conference (ESWC 2023)}},
  editor       = {{Pesquita, Catia and Jimenez-Ruiz, Ernesto and McCusker, Jamie and Faria, Daniel and Dragoni, Mauro and Dimou, Anastasia and Troncy, Raphael and Hertling, Sven}},
  keywords     = {{Neural network, Concept learning, Description logics}},
  location     = {{Hersonissos, Crete, Greece}},
  pages        = {{209 -- 226}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Neural Class Expression Synthesis}}},
  doi          = {{https://doi.org/10.1007/978-3-031-33455-9_13}},
  volume       = {{13870}},
  year         = {{2023}},
}

@article{46243,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{ECML-PKDD}},
  location     = {{Torino}},
  title        = {{{Clifford Embeddings – A Generalized Approach for Embedding in Normed Algebras}}},
  year         = {{2023}},
}

@article{46251,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{International Joint Conference on Artificial Intelligence}},
  location     = {{Macau}},
  title        = {{{Neuro-Symbolic Class Expression Learning}}},
  year         = {{2023}},
}

@inproceedings{32509,
  abstract     = {{ We consider fact-checking approaches that aim to predict the veracity of assertions in knowledge graphs. Five main categories of fact-checking approaches for knowledge graphs have been proposed in the recent literature, of
which each is subject to partially overlapping limitations. In particular, current text-based approaches are limited by manual feature engineering. Path-based and rule-based approaches are limited by their exclusive use of knowledge graphs as background knowledge, and embedding-based approaches suffer from low accuracy scores on current fact-checking tasks. We propose a hybrid approach—dubbed HybridFC—that exploits the diversity of existing categories of fact-checking approaches within an ensemble learning setting to achieve a significantly better prediction performance. In particular, our approach outperforms the state of the art by 0.14 to 0.27 in terms of Area Under the Receiver Operating Characteristic curve on the FactBench dataset. Our code is open-source and can be found at https://github.com/dice-group/HybridFC.}},
  author       = {{Qudus, Umair and Röder, Michael and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web -- ISWC 2022}},
  editor       = {{Sattler, Ulrike and Hogan, Aidan and Keet, Maria and Presutti, Valentina}},
  isbn         = {{978-3-031-19433-7}},
  keywords     = {{fact checking · ensemble learning · knowledge graph veracit}},
  location     = {{Hanghzou, China}},
  pages        = {{462----480}},
  publisher    = {{Springer International Publishing}},
  title        = {{{HybridFC: A Hybrid Fact-Checking Approach for Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-19433-7_27}},
  year         = {{2022}},
}

@article{25212,
  abstract     = {{Finding a good query plan is key to the optimization of query runtime. This holds in particular for cost-based federation
engines, which make use of cardinality estimations to achieve this goal. A number of studies compare SPARQL federation engines across different performance metrics, including query runtime, result set completeness and correctness, number of sources selected and number of requests sent. Albeit informative, these metrics are generic and unable to quantify and evaluate the accuracy of the cardinality estimators of cost-based federation engines. To thoroughly evaluate cost-based federation engines, the effect of estimated cardinality errors on the overall query runtime performance must be measured. In this paper, we address this challenge by presenting novel evaluation metrics targeted at a fine-grained benchmarking of cost-based federated SPARQL query engines. We evaluate five cost-based federated SPARQL query engines using existing as well as novel evaluation metrics by using LargeRDFBench queries. Our results provide a detailed analysis of the experimental outcomes that reveal novel insights, useful for the development of future cost-based federated SPARQL query processing engines.}},
  author       = {{Qudus, Umair and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille and Lee, Young-Koo}},
  issn         = {{2210-4968}},
  journal      = {{Semantic Web}},
  keywords     = {{SPARQL, benchmarking, cost-based, cost-free, federated, querying}},
  number       = {{6}},
  pages        = {{843--868}},
  publisher    = {{ISO Press}},
  title        = {{{An Empirical Evaluation of Cost-based Federated SPARQL Query Processing Engines}}},
  doi          = {{10.3233/SW-200420}},
  volume       = {{12}},
  year         = {{2021}},
}

