@inproceedings{61041,
  abstract     = {{Large Language Models (LLMs) are increasingly deployed in real-world applications that require access to up-to-date knowledge. However, retraining LLMs is computationally expensive. Therefore, knowledge editing techniques are crucial for maintaining current information and correcting erroneous assertions within pre-trained models. Current benchmarks for knowledge editing primarily focus on recalling edited facts, often neglecting their logical consequences. To address this limitation, we introduce a new benchmark designed to evaluate how knowledge editing methods handle the logical consequences of a single fact edit. Our benchmark extracts relevant logical rules from a knowledge graph for a given edit. Then, it generates multi-hop questions based on these rules to assess the impact on logical consequences. Our findings indicate that while existing knowledge editing approaches can accurately insert direct assertions into LLMs, they frequently fail to inject entailed knowledge. Specifically, experiments with popular methods like ROME and FT reveal a substantial performance gap, up to 24%, between evaluations on directly edited knowledge and on entailed knowledge. This highlights the critical need for semantics-aware evaluation frameworks in knowledge editing.}},
  author       = {{Moteu Ngoli, Tatiana and Kouagou, N'Dah Jean and Zahera, Hamada Mohamed Abdelsamee and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 24th International Semantic Web Conference (ISWC 2025)}},
  isbn         = {{978-3-032-09530-5}},
  keywords     = {{dice sailproject moteu kouagou zahera ngonga}},
  location     = {{Nara, Japan}},
  pages        = {{pp 41--56}},
  publisher    = {{Springer, Cham}},
  title        = {{{Benchmarking Knowledge Editing using Logical Rules}}},
  doi          = {{https://doi.org/10.1007/978-3-032-09530-5_3}},
  year         = {{2025}},
}

@inproceedings{61753,
  abstract     = {{This paper presents LOLA, a massively multilingual large language model trained on more than 160 languages using a sparse Mixture-of-Experts Transformer architecture. Our architectural and implementation choices address the challenge of harnessing linguistic diversity while maintaining efficiency and avoiding the common pitfalls of multilinguality. Our analysis of the evaluation results shows competitive performance in natural language generation and understanding tasks. Additionally, we demonstrate how the learned expert-routing mechanism exploits implicit phylogenetic linguistic patterns to potentially alleviate the curse of multilinguality. We provide an in-depth look at the training process, an analysis of the datasets, and a balanced exploration of the model{’}s strengths and limitations. As an open-source model, LOLA promotes reproducibility and serves as a robust foundation for future research. Our findings enable the development of compute-efficient multilingual models with strong, scalable performance across languages.}},
  author       = {{Srivastava, Nikit and Kuchelev, Denis and Moteu Ngoli, Tatiana and Shetty, Kshitij and Röder, Michael and Zahera, Hamada Mohamed Abdelsamee and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 31st International Conference on Computational Linguistics}},
  editor       = {{Rambow, Owen and Wanner, Leo and Apidianaki, Marianna and Al-Khalifa, Hend and Eugenio, Barbara Di and Schockaert, Steven}},
  pages        = {{6420–6446}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{LOLA – An Open-Source Massively Multilingual Large Language Model}}},
  year         = {{2025}},
}

@inproceedings{57278,
  author       = {{Morim da Silva, Ana Alexandra and Srivastava, Nikit and Moteu Ngoli, Tatiana and Röder, Michael and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Benchmarking Low-Resource Machine Translation Systems}}},
  doi          = {{10.18653/v1/2024.loresmt-1.18}},
  year         = {{2024}},
}

