@inproceedings{62007,
  abstract     = {{Ensemble methods are widely employed to improve generalization in machine learning. This has also prompted the adoption of ensemble learning for the knowledge graph embedding (KGE) models in performing link prediction. Typical approaches to this end train multiple models as part of the ensemble, and the diverse predictions are then averaged. However, this approach has some significant drawbacks. For instance, the computational overhead of training multiple models increases latency and memory overhead. In contrast, model merging approaches offer a promising alternative that does not require training multiple models. In this work, we introduce model merging, specifically weighted averaging, in
KGE models. Herein, a running average of model parameters from a training epoch onward is maintained and used for predictions. To address this, we additionally propose an approach that selectively updates the running average of the ensemble model parameters only when the generalization performance improves on a validation dataset. We evaluate these two different weighted averaging approaches on link prediction tasks, comparing the state-of-the-art benchmark ensemble approach. Additionally, we evaluate the weighted averaging approach considering literal-augmented KGE models and multi-hop query answering tasks as well. The results demonstrate that the proposed weighted averaging approach consistently improves performance across diverse evaluation settings.}},
  author       = {{Sapkota, Rupesh and Demir, Caglar and Sharma, Arnab and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Thirteenth International Conference on Knowledge Capture(K-CAP 2025)}},
  keywords     = {{Knowledge Graphs, Embeddings, Ensemble Learning}},
  location     = {{Dayton, OH, USA}},
  publisher    = {{ACM}},
  title        = {{{Parameter Averaging in Link Prediction}}},
  doi          = {{https://doi.org/10.1145/3731443.3771365}},
  year         = {{2025}},
}

@inproceedings{57240,
  abstract     = {{Validating assertions before adding them to a knowledge graph is an essential part of its creation and maintenance. Due to the sheer size of knowledge graphs, automatic fact-checking approaches have been developed. These approaches rely on reference knowledge to decide whether a given assertion is correct. Recent hybrid approaches achieve good results by including several knowledge sources. However, it is often impractical to provide a sheer quantity of textual knowledge or generate embedding models to leverage these hybrid approaches. We present FaVEL, an approach that uses algorithm selection and ensemble learning to amalgamate several existing fact-checking approaches that rely solely on a reference knowledge graph and, hence, use fewer resources than current hybrid approaches. For our evaluation, we create updated versions of two existing datasets and a new dataset dubbed FaVEL-DS. Our evaluation compares our approach to 15 fact-checking approaches—including the state-of-the-art approach HybridFC—on 3 datasets. Our results demonstrate that FaVEL outperforms all other approaches significantly by at least 0.04 in terms of the area under the ROC curve. Our source code, datasets, and evaluation results are open-source and can be found at https://github.com/dice-group/favel.}},
  author       = {{Qudus, Umair and Röder, Michael and Tatkeu Pekarou, Franck Lionel and Morim da Silva, Ana Alexandra and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{EKAW 2024}},
  editor       = {{Rospocher, Marco}},
  keywords     = {{fact checking, ensemble learning, transfer learning, knowledge management.}},
  location     = {{Amsterdam, Netherlands}},
  title        = {{{FaVEL: Fact Validation Ensemble Learning}}},
  year         = {{2024}},
}

@inproceedings{50479,
  abstract     = {{Verifying assertions is an essential part of creating and maintaining knowledge graphs. Most often, this task cannot be carried out manually due to the sheer size of modern knowledge graphs. Hence, automatic fact-checking approaches have been proposed over the last decade. These approaches aim to compute automatically whether a given assertion is correct or incorrect. However, most fact-checking approaches are binary classifiers that fail to consider the volatility of some assertions, i.e., the fact that such assertions are only valid at certain times or for specific time intervals. Moreover, the few approaches able to predict when an assertion was valid (i.e., time-point prediction approaches) rely on manual feature engineering. This paper presents TEMPORALFC, a temporal fact-checking approach that uses multiple sources of background knowledge to assess the veracity and temporal validity of a given assertion. We evaluate TEMPORALFC on two datasets and compare it to the state of the art in fact-checking and time-point prediction. Our results suggest that TEMPORALFC outperforms the state of the art on the fact-checking task by 0.13 to 0.15 in terms of Area Under the Receiver Operating Characteristic curve and on the time-point prediction task by 0.25 to 0.27 in terms of Mean Reciprocal Rank. Our code is open-source and can be found at https://github.com/dice-group/TemporalFC.}},
  author       = {{Qudus, Umair and Röder, Michael and Kirrane, Sabrina and Ngomo, Axel-Cyrille Ngonga}},
  booktitle    = {{The Semantic Web – ISWC 2023}},
  editor       = {{R. Payne, Terry and Presutti, Valentina and Qi, Guilin and Poveda-Villalón, María and Stoilos, Giorgos and Hollink, Laura and Kaoudi, Zoi and Cheng, Gong and Li, Juanzi}},
  isbn         = {{9783031472398}},
  issn         = {{0302-9743}},
  keywords     = {{temporal fact checking · ensemble learning · transfer learning · time-point prediction · temporal knowledge graphs}},
  location     = {{Athens, Greece}},
  pages        = {{465–483}},
  publisher    = {{Springer, Cham}},
  title        = {{{TemporalFC: A Temporal Fact Checking Approach over Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-47240-4_25}},
  volume       = {{14265}},
  year         = {{2023}},
}

@inproceedings{32509,
  abstract     = {{ We consider fact-checking approaches that aim to predict the veracity of assertions in knowledge graphs. Five main categories of fact-checking approaches for knowledge graphs have been proposed in the recent literature, of
which each is subject to partially overlapping limitations. In particular, current text-based approaches are limited by manual feature engineering. Path-based and rule-based approaches are limited by their exclusive use of knowledge graphs as background knowledge, and embedding-based approaches suffer from low accuracy scores on current fact-checking tasks. We propose a hybrid approach—dubbed HybridFC—that exploits the diversity of existing categories of fact-checking approaches within an ensemble learning setting to achieve a significantly better prediction performance. In particular, our approach outperforms the state of the art by 0.14 to 0.27 in terms of Area Under the Receiver Operating Characteristic curve on the FactBench dataset. Our code is open-source and can be found at https://github.com/dice-group/HybridFC.}},
  author       = {{Qudus, Umair and Röder, Michael and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web -- ISWC 2022}},
  editor       = {{Sattler, Ulrike and Hogan, Aidan and Keet, Maria and Presutti, Valentina}},
  isbn         = {{978-3-031-19433-7}},
  keywords     = {{fact checking · ensemble learning · knowledge graph veracit}},
  location     = {{Hanghzou, China}},
  pages        = {{462----480}},
  publisher    = {{Springer International Publishing}},
  title        = {{{HybridFC: A Hybrid Fact-Checking Approach for Knowledge Graphs}}},
  doi          = {{10.1007/978-3-031-19433-7_27}},
  year         = {{2022}},
}

