@inbook{62701,
  abstract     = {{Learning  continuous  vector  representations  for  knowledge graphs has signiﬁcantly improved state-of-the-art performances in many challenging tasks. Yet, deep-learning-based models are only post-hoc and locally explainable. In contrast, learning Web Ontology Language (OWL) class  expressions  in  Description  Logics  (DLs)  is  ante-hoc  and  globally explainable. However, state-of-the-art learners have two well-known lim-itations:  scaling  to  large  knowledge  graphs  and  handling  missing  infor-mation.  Here,  we  present  a  decision-tree-based  learner  (tDL)  to  learn Web  Ontology  Languages  (OWLs)  class  expressions  over  large  knowl-edge graphs, while imputing missing triples. Given positive and negative example individuals, tDL  ﬁrstly constructs unique OWL expressions in .SHOIN from  concise  bounded  descriptions  of  individuals.  Each  OWL class expression is used as a feature in a binary classiﬁcation problem to represent input individuals. Thereafter, tDL  ﬁts a CART decision tree to learn Boolean decision rules distinguishing positive examples from nega-tive examples. A ﬁnal OWL expression in.SHOIN is built by traversing the  built  CART  decision  tree  from  the  root  node  to  leaf  nodes  for  each positive example. By this, tDL  can learn OWL class expressions without exploration, i.e., the number of queries to a knowledge graph is bounded by the number of input individuals. Our empirical results show that tDL outperforms  the  current state-of-the-art  models  across datasets. Impor-tantly, our experiments over a large knowledge graph (DBpedia with 1.1 billion triples) show that tDL  can eﬀectively learn accurate OWL class expressions,  while  the  state-of-the-art  models  fail  to  return  any  results. Finally,  expressions  learned  by  tDL  can  be  seamlessly  translated  into natural language explanations using a pre-trained large language model and a DL verbalizer.}},
  author       = {{Demir, Caglar and Yekini, Moshood and Röder, Michael and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032060655}},
  issn         = {{0302-9743}},
  keywords     = {{Decision Tree, OWL Class Expression Learning, Description Logic, Knowledge Graph, Large Language Model, Verbalizer}},
  location     = {{Porto, Portugal}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Tree-Based OWL Class Expression Learner over Large Graphs}}},
  doi          = {{10.1007/978-3-032-06066-2_29}},
  year         = {{2025}},
}

@inproceedings{61041,
  abstract     = {{Large Language Models (LLMs) are increasingly deployed in real-world applications that require access to up-to-date knowledge. However, retraining LLMs is computationally expensive. Therefore, knowledge editing techniques are crucial for maintaining current information and correcting erroneous assertions within pre-trained models. Current benchmarks for knowledge editing primarily focus on recalling edited facts, often neglecting their logical consequences. To address this limitation, we introduce a new benchmark designed to evaluate how knowledge editing methods handle the logical consequences of a single fact edit. Our benchmark extracts relevant logical rules from a knowledge graph for a given edit. Then, it generates multi-hop questions based on these rules to assess the impact on logical consequences. Our findings indicate that while existing knowledge editing approaches can accurately insert direct assertions into LLMs, they frequently fail to inject entailed knowledge. Specifically, experiments with popular methods like ROME and FT reveal a substantial performance gap, up to 24%, between evaluations on directly edited knowledge and on entailed knowledge. This highlights the critical need for semantics-aware evaluation frameworks in knowledge editing.}},
  author       = {{Moteu Ngoli, Tatiana and Kouagou, N'Dah Jean and Zahera, Hamada Mohamed Abdelsamee and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 24th International Semantic Web Conference (ISWC 2025)}},
  isbn         = {{978-3-032-09530-5}},
  keywords     = {{dice sailproject moteu kouagou zahera ngonga}},
  location     = {{Nara, Japan}},
  pages        = {{pp 41--56}},
  publisher    = {{Springer, Cham}},
  title        = {{{Benchmarking Knowledge Editing using Logical Rules}}},
  doi          = {{https://doi.org/10.1007/978-3-032-09530-5_3}},
  year         = {{2025}},
}

@inproceedings{62007,
  abstract     = {{Ensemble methods are widely employed to improve generalization in machine learning. This has also prompted the adoption of ensemble learning for the knowledge graph embedding (KGE) models in performing link prediction. Typical approaches to this end train multiple models as part of the ensemble, and the diverse predictions are then averaged. However, this approach has some significant drawbacks. For instance, the computational overhead of training multiple models increases latency and memory overhead. In contrast, model merging approaches offer a promising alternative that does not require training multiple models. In this work, we introduce model merging, specifically weighted averaging, in
KGE models. Herein, a running average of model parameters from a training epoch onward is maintained and used for predictions. To address this, we additionally propose an approach that selectively updates the running average of the ensemble model parameters only when the generalization performance improves on a validation dataset. We evaluate these two different weighted averaging approaches on link prediction tasks, comparing the state-of-the-art benchmark ensemble approach. Additionally, we evaluate the weighted averaging approach considering literal-augmented KGE models and multi-hop query answering tasks as well. The results demonstrate that the proposed weighted averaging approach consistently improves performance across diverse evaluation settings.}},
  author       = {{Sapkota, Rupesh and Demir, Caglar and Sharma, Arnab and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Thirteenth International Conference on Knowledge Capture(K-CAP 2025)}},
  keywords     = {{Knowledge Graphs, Embeddings, Ensemble Learning}},
  location     = {{Dayton, OH, USA}},
  publisher    = {{ACM}},
  title        = {{{Parameter Averaging in Link Prediction}}},
  doi          = {{https://doi.org/10.1145/3731443.3771365}},
  year         = {{2025}},
}

@inproceedings{56213,
  author       = {{Sapkota, Rupesh and Köhler, Dominik and Heindorf, Stefan}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM ’24),}},
  location     = {{Boise, Idaho, USA}},
  publisher    = {{ACM}},
  title        = {{{EDGE: Evaluation Framework for Logical vs. Subgraph Explanations for Node Classifiers on Knowledge Graphs}}},
  doi          = {{10.1145/3627673.3679904}},
  year         = {{2024}},
}

@inproceedings{57240,
  abstract     = {{Validating assertions before adding them to a knowledge graph is an essential part of its creation and maintenance. Due to the sheer size of knowledge graphs, automatic fact-checking approaches have been developed. These approaches rely on reference knowledge to decide whether a given assertion is correct. Recent hybrid approaches achieve good results by including several knowledge sources. However, it is often impractical to provide a sheer quantity of textual knowledge or generate embedding models to leverage these hybrid approaches. We present FaVEL, an approach that uses algorithm selection and ensemble learning to amalgamate several existing fact-checking approaches that rely solely on a reference knowledge graph and, hence, use fewer resources than current hybrid approaches. For our evaluation, we create updated versions of two existing datasets and a new dataset dubbed FaVEL-DS. Our evaluation compares our approach to 15 fact-checking approaches—including the state-of-the-art approach HybridFC—on 3 datasets. Our results demonstrate that FaVEL outperforms all other approaches significantly by at least 0.04 in terms of the area under the ROC curve. Our source code, datasets, and evaluation results are open-source and can be found at https://github.com/dice-group/favel.}},
  author       = {{Qudus, Umair and Röder, Michael and Tatkeu Pekarou, Franck Lionel and Morim da Silva, Ana Alexandra and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{EKAW 2024}},
  editor       = {{Rospocher, Marco}},
  keywords     = {{fact checking, ensemble learning, transfer learning, knowledge management.}},
  location     = {{Amsterdam, Netherlands}},
  title        = {{{FaVEL: Fact Validation Ensemble Learning}}},
  year         = {{2024}},
}

@inbook{61210,
  abstract     = {{Knowledge graphs (KGs) differ significantly over multiple different versions of the same data source. They also often contain blank nodes that do not have a constant identifier over all versions. Linking such blank nodes from different versions is a challenging task. Previous works propose different approaches to create signatures for all blank nodes based on named nodes in their neighborhood to match blank nodes with similar signatures. However, these works struggle to find a good mapping when the difference between the KGs’ versions grows too large. In this work, we propose Blink, an embedding-based approach for blank node linking. Blink merges two KGs’ versions and embeds the merged graph into a latent vector space based on translational embeddings and subsequently matches the closest pairs of blank nodes from different graphs. We evaluate our approach using real-world datasets against state-of-the-art approaches by computing the blank node matching for isomorphic graphs and graphs that contain triple changes (i.e., added or removed triples). The results indicate that Blink achieves perfect accuracy for isomorphic graphs. For graph versions that contain changes, such as having up to 20% of triples removed in one version, Blink still produces a mapping with an Optimal Mapping Deviation Ratio of under 1%. These results show that Blink leads to a better linking of KGs over different versions and similar graphs adhering to the linked data guidelines.}},
  author       = {{Becker, Alexander and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031778438}},
  issn         = {{0302-9743}},
  location     = {{Baltimore, USA}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Blink: Blank Node Matching Using Embeddings}}},
  doi          = {{10.1007/978-3-031-77844-5_12}},
  year         = {{2024}},
}

@article{46248,
  author       = {{Demir, Caglar and Wiebesiek, Michel and Lu, Renzhong and Ngonga Ngomo, Axel-Cyrille and Heindorf, Stefan}},
  journal      = {{ECML PKDD}},
  location     = {{Torino}},
  title        = {{{LitCQD: Multi-Hop Reasoning in Incomplete Knowledge Graphs with Numeric Literals}}},
  year         = {{2023}},
}

@inproceedings{33734,
  abstract     = {{Many applications require explainable node classification in knowledge graphs. Towards this end, a popular ``white-box'' approach is class expression learning: Given sets of positive and negative nodes, class expressions in description logics are learned that separate positive from negative nodes. Most existing approaches are search-based approaches generating many candidate class expressions and selecting the best one. However, they often take a long time to find suitable class expressions. In this paper, we cast class expression learning as a translation problem and propose a new family of class expression learning approaches which we dub neural class expression synthesizers. Training examples are ``translated'' into class expressions in a fashion akin to machine translation. Consequently, our synthesizers are not subject to the runtime limitations of search-based approaches. We study three instances of this novel family of approaches based on LSTMs, GRUs, and set transformers, respectively. An evaluation of our approach on four benchmark datasets suggests that it can effectively synthesize high-quality class expressions with respect to the input examples in approximately one second on average. Moreover, a comparison to state-of-the-art approaches suggests that we achieve better F-measures on large datasets. For reproducibility purposes, we provide our implementation as well as pretrained models in our public GitHub repository at https://github.com/dice-group/NeuralClassExpressionSynthesis}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 20th Extended Semantic Web Conference (ESWC 2023)}},
  editor       = {{Pesquita, Catia and Jimenez-Ruiz, Ernesto and McCusker, Jamie and Faria, Daniel and Dragoni, Mauro and Dimou, Anastasia and Troncy, Raphael and Hertling, Sven}},
  keywords     = {{Neural network, Concept learning, Description logics}},
  location     = {{Hersonissos, Crete, Greece}},
  pages        = {{209 -- 226}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Neural Class Expression Synthesis}}},
  doi          = {{https://doi.org/10.1007/978-3-031-33455-9_13}},
  volume       = {{13870}},
  year         = {{2023}},
}

@article{46243,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{ECML-PKDD}},
  location     = {{Torino}},
  title        = {{{Clifford Embeddings – A Generalized Approach for Embedding in Normed Algebras}}},
  year         = {{2023}},
}

@article{46251,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{International Joint Conference on Artificial Intelligence}},
  location     = {{Macau}},
  title        = {{{Neuro-Symbolic Class Expression Learning}}},
  year         = {{2023}},
}

