[{"type":"conference","status":"public","user_id":"99174","department":[{"_id":"574"}],"project":[{"_id":"285","name":"SAIL - Nachhaltiger Lebenszyklus von intelligenten soziotechnischen Systemen"}],"_id":"61041","publication_status":"published","publication_identifier":{"isbn":["978-3-032-09530-5"]},"citation":{"chicago":"Moteu Ngoli, Tatiana, N’Dah Jean Kouagou, Hamada Mohamed Abdelsamee Zahera, and Axel-Cyrille Ngonga Ngomo. “Benchmarking Knowledge Editing Using Logical Rules.” In <i>Proceedings of the 24th International Semantic Web Conference (ISWC 2025)</i>, pp 41-56. Springer, Cham, 2025. <a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>.","ieee":"T. Moteu Ngoli, N. J. Kouagou, H. M. A. Zahera, and A.-C. Ngonga Ngomo, “Benchmarking Knowledge Editing using Logical Rules,” in <i>Proceedings of the 24th International Semantic Web Conference (ISWC 2025)</i>, Nara, Japan, 2025, p. pp 41-56, doi: <a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>.","ama":"Moteu Ngoli T, Kouagou NJ, Zahera HMA, Ngonga Ngomo A-C. Benchmarking Knowledge Editing using Logical Rules. In: <i>Proceedings of the 24th International Semantic Web Conference (ISWC 2025)</i>. Springer, Cham; 2025:pp 41-56. doi:<a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>","mla":"Moteu Ngoli, Tatiana, et al. “Benchmarking Knowledge Editing Using Logical Rules.” <i>Proceedings of the 24th International Semantic Web Conference (ISWC 2025)</i>, Springer, Cham, 2025, p. pp 41-56, doi:<a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>.","short":"T. Moteu Ngoli, N.J. Kouagou, H.M.A. Zahera, A.-C. Ngonga Ngomo, in: Proceedings of the 24th International Semantic Web Conference (ISWC 2025), Springer, Cham, 2025, p. pp 41-56.","bibtex":"@inproceedings{Moteu Ngoli_Kouagou_Zahera_Ngonga Ngomo_2025, title={Benchmarking Knowledge Editing using Logical Rules}, DOI={<a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>}, booktitle={Proceedings of the 24th International Semantic Web Conference (ISWC 2025)}, publisher={Springer, Cham}, author={Moteu Ngoli, Tatiana and Kouagou, N’Dah Jean and Zahera, Hamada Mohamed Abdelsamee and Ngonga Ngomo, Axel-Cyrille}, year={2025}, pages={pp 41-56} }","apa":"Moteu Ngoli, T., Kouagou, N. J., Zahera, H. M. A., &#38; Ngonga Ngomo, A.-C. (2025). Benchmarking Knowledge Editing using Logical Rules. <i>Proceedings of the 24th International Semantic Web Conference (ISWC 2025)</i>, pp 41-56. <a href=\"https://doi.org/10.1007/978-3-032-09530-5_3\">https://doi.org/10.1007/978-3-032-09530-5_3</a>"},"page":"pp 41-56","author":[{"last_name":"Moteu Ngoli","full_name":"Moteu Ngoli, Tatiana","id":"99174","first_name":"Tatiana"},{"last_name":"Kouagou","id":"87189","full_name":"Kouagou, N'Dah Jean","first_name":"N'Dah Jean"},{"first_name":"Hamada Mohamed Abdelsamee","full_name":"Zahera, Hamada Mohamed Abdelsamee","id":"72768","orcid":"0000-0003-0215-1278","last_name":"Zahera"},{"last_name":"Ngonga Ngomo","full_name":"Ngonga Ngomo, Axel-Cyrille","id":"65716","first_name":"Axel-Cyrille"}],"date_updated":"2025-12-01T10:04:25Z","doi":"https://doi.org/10.1007/978-3-032-09530-5_3","conference":{"end_date":"2025.11.6","location":"Nara, Japan","name":"The 24th International Semantic Web Conference (ISWC 2025)","start_date":"2025.11.2"},"publication":"Proceedings of the 24th International Semantic Web Conference (ISWC 2025)","abstract":[{"lang":"eng","text":"Large Language Models (LLMs) are increasingly deployed in real-world applications that require access to up-to-date knowledge. However, retraining LLMs is computationally expensive. Therefore, knowledge editing techniques are crucial for maintaining current information and correcting erroneous assertions within pre-trained models. Current benchmarks for knowledge editing primarily focus on recalling edited facts, often neglecting their logical consequences. To address this limitation, we introduce a new benchmark designed to evaluate how knowledge editing methods handle the logical consequences of a single fact edit. Our benchmark extracts relevant logical rules from a knowledge graph for a given edit. Then, it generates multi-hop questions based on these rules to assess the impact on logical consequences. Our findings indicate that while existing knowledge editing approaches can accurately insert direct assertions into LLMs, they frequently fail to inject entailed knowledge. Specifically, experiments with popular methods like ROME and FT reveal a substantial performance gap, up to 24%, between evaluations on directly edited knowledge and on entailed knowledge. This highlights the critical need for semantics-aware evaluation frameworks in knowledge editing."}],"language":[{"iso":"eng"}],"keyword":["dice sailproject moteu kouagou zahera ngonga"],"year":"2025","date_created":"2025-08-27T13:17:55Z","publisher":"Springer, Cham","title":"Benchmarking Knowledge Editing using Logical Rules"},{"type":"conference","publication":"Proceedings of the 31st International Conference on Computational Linguistics","status":"public","abstract":[{"lang":"eng","text":"This paper presents LOLA, a massively multilingual large language model trained on more than 160 languages using a sparse Mixture-of-Experts Transformer architecture. Our architectural and implementation choices address the challenge of harnessing linguistic diversity while maintaining efficiency and avoiding the common pitfalls of multilinguality. Our analysis of the evaluation results shows competitive performance in natural language generation and understanding tasks. Additionally, we demonstrate how the learned expert-routing mechanism exploits implicit phylogenetic linguistic patterns to potentially alleviate the curse of multilinguality. We provide an in-depth look at the training process, an analysis of the datasets, and a balanced exploration of the model{’}s strengths and limitations. As an open-source model, LOLA promotes reproducibility and serves as a robust foundation for future research. Our findings enable the development of compute-efficient multilingual models with strong, scalable performance across languages."}],"editor":[{"full_name":"Rambow, Owen","last_name":"Rambow","first_name":"Owen"},{"first_name":"Leo","last_name":"Wanner","full_name":"Wanner, Leo"},{"first_name":"Marianna","full_name":"Apidianaki, Marianna","last_name":"Apidianaki"},{"first_name":"Hend","full_name":"Al-Khalifa, Hend","last_name":"Al-Khalifa"},{"first_name":"Barbara Di","last_name":"Eugenio","full_name":"Eugenio, Barbara Di"},{"first_name":"Steven","full_name":"Schockaert, Steven","last_name":"Schockaert"}],"user_id":"70066","_id":"61753","language":[{"iso":"eng"}],"citation":{"ama":"Srivastava N, Kuchelev D, Moteu Ngoli T, et al. LOLA – An Open-Source Massively Multilingual Large Language Model. In: Rambow O, Wanner L, Apidianaki M, Al-Khalifa H, Eugenio BD, Schockaert S, eds. <i>Proceedings of the 31st International Conference on Computational Linguistics</i>. Association for Computational Linguistics; 2025:6420–6446.","ieee":"N. Srivastava <i>et al.</i>, “LOLA – An Open-Source Massively Multilingual Large Language Model,” in <i>Proceedings of the 31st International Conference on Computational Linguistics</i>, 2025, pp. 6420–6446.","chicago":"Srivastava, Nikit, Denis Kuchelev, Tatiana Moteu Ngoli, Kshitij Shetty, Michael Röder, Hamada Mohamed Abdelsamee Zahera, Diego Moussallem, and Axel-Cyrille Ngonga Ngomo. “LOLA – An Open-Source Massively Multilingual Large Language Model.” In <i>Proceedings of the 31st International Conference on Computational Linguistics</i>, edited by Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, 6420–6446. Abu Dhabi, UAE: Association for Computational Linguistics, 2025.","apa":"Srivastava, N., Kuchelev, D., Moteu Ngoli, T., Shetty, K., Röder, M., Zahera, H. M. A., Moussallem, D., &#38; Ngonga Ngomo, A.-C. (2025). LOLA – An Open-Source Massively Multilingual Large Language Model. In O. Rambow, L. Wanner, M. Apidianaki, H. Al-Khalifa, B. D. Eugenio, &#38; S. Schockaert (Eds.), <i>Proceedings of the 31st International Conference on Computational Linguistics</i> (pp. 6420–6446). Association for Computational Linguistics.","mla":"Srivastava, Nikit, et al. “LOLA – An Open-Source Massively Multilingual Large Language Model.” <i>Proceedings of the 31st International Conference on Computational Linguistics</i>, edited by Owen Rambow et al., Association for Computational Linguistics, 2025, pp. 6420–6446.","bibtex":"@inproceedings{Srivastava_Kuchelev_Moteu Ngoli_Shetty_Röder_Zahera_Moussallem_Ngonga Ngomo_2025, place={Abu Dhabi, UAE}, title={LOLA – An Open-Source Massively Multilingual Large Language Model}, booktitle={Proceedings of the 31st International Conference on Computational Linguistics}, publisher={Association for Computational Linguistics}, author={Srivastava, Nikit and Kuchelev, Denis and Moteu Ngoli, Tatiana and Shetty, Kshitij and Röder, Michael and Zahera, Hamada Mohamed Abdelsamee and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}, editor={Rambow, Owen and Wanner, Leo and Apidianaki, Marianna and Al-Khalifa, Hend and Eugenio, Barbara Di and Schockaert, Steven}, year={2025}, pages={6420–6446} }","short":"N. Srivastava, D. Kuchelev, T. Moteu Ngoli, K. Shetty, M. Röder, H.M.A. Zahera, D. Moussallem, A.-C. Ngonga Ngomo, in: O. Rambow, L. Wanner, M. Apidianaki, H. Al-Khalifa, B.D. Eugenio, S. Schockaert (Eds.), Proceedings of the 31st International Conference on Computational Linguistics, Association for Computational Linguistics, Abu Dhabi, UAE, 2025, pp. 6420–6446."},"page":"6420–6446","place":"Abu Dhabi, UAE","year":"2025","date_created":"2025-10-08T11:02:30Z","author":[{"first_name":"Nikit","id":"70066","full_name":"Srivastava, Nikit","last_name":"Srivastava","orcid":"0009-0004-5164-4911"},{"first_name":"Denis","last_name":"Kuchelev","full_name":"Kuchelev, Denis","id":"70842"},{"first_name":"Tatiana","full_name":"Moteu Ngoli, Tatiana","id":"99174","last_name":"Moteu Ngoli"},{"first_name":"Kshitij","last_name":"Shetty","full_name":"Shetty, Kshitij"},{"first_name":"Michael","orcid":"https://orcid.org/0000-0002-8609-8277","last_name":"Röder","id":"67199","full_name":"Röder, Michael"},{"first_name":"Hamada Mohamed Abdelsamee","id":"72768","full_name":"Zahera, Hamada Mohamed Abdelsamee","last_name":"Zahera","orcid":"0000-0003-0215-1278"},{"first_name":"Diego","id":"71635","full_name":"Moussallem, Diego","last_name":"Moussallem"},{"first_name":"Axel-Cyrille","id":"65716","full_name":"Ngonga Ngomo, Axel-Cyrille","last_name":"Ngonga Ngomo"}],"date_updated":"2026-01-06T10:11:37Z","publisher":"Association for Computational Linguistics","oa":"1","main_file_link":[{"open_access":"1","url":"https://aclanthology.org/2025.coling-main.428.pdf"}],"title":"LOLA – An Open-Source Massively Multilingual Large Language Model"},{"year":"2024","citation":{"ieee":"A. A. Morim da Silva, N. Srivastava, T. Moteu Ngoli, M. Röder, D. Moussallem, and A.-C. Ngonga Ngomo, “Benchmarking Low-Resource Machine Translation Systems,” 2024, doi: <a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">10.18653/v1/2024.loresmt-1.18</a>.","chicago":"Morim da Silva, Ana Alexandra, Nikit Srivastava, Tatiana Moteu Ngoli, Michael Röder, Diego Moussallem, and Axel-Cyrille Ngonga Ngomo. “Benchmarking Low-Resource Machine Translation Systems.” In <i>Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)</i>. Association for Computational Linguistics, 2024. <a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">https://doi.org/10.18653/v1/2024.loresmt-1.18</a>.","ama":"Morim da Silva AA, Srivastava N, Moteu Ngoli T, Röder M, Moussallem D, Ngonga Ngomo A-C. Benchmarking Low-Resource Machine Translation Systems. In: <i>Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)</i>. Association for Computational Linguistics; 2024. doi:<a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">10.18653/v1/2024.loresmt-1.18</a>","bibtex":"@inproceedings{Morim da Silva_Srivastava_Moteu Ngoli_Röder_Moussallem_Ngonga Ngomo_2024, title={Benchmarking Low-Resource Machine Translation Systems}, DOI={<a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">10.18653/v1/2024.loresmt-1.18</a>}, booktitle={Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)}, publisher={Association for Computational Linguistics}, author={Morim da Silva, Ana Alexandra and Srivastava, Nikit and Moteu Ngoli, Tatiana and Röder, Michael and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}, year={2024} }","mla":"Morim da Silva, Ana Alexandra, et al. “Benchmarking Low-Resource Machine Translation Systems.” <i>Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)</i>, Association for Computational Linguistics, 2024, doi:<a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">10.18653/v1/2024.loresmt-1.18</a>.","short":"A.A. Morim da Silva, N. Srivastava, T. Moteu Ngoli, M. Röder, D. Moussallem, A.-C. Ngonga Ngomo, in: Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024), Association for Computational Linguistics, 2024.","apa":"Morim da Silva, A. A., Srivastava, N., Moteu Ngoli, T., Röder, M., Moussallem, D., &#38; Ngonga Ngomo, A.-C. (2024). Benchmarking Low-Resource Machine Translation Systems. <i>Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)</i>. <a href=\"https://doi.org/10.18653/v1/2024.loresmt-1.18\">https://doi.org/10.18653/v1/2024.loresmt-1.18</a>"},"publication_status":"published","title":"Benchmarking Low-Resource Machine Translation Systems","doi":"10.18653/v1/2024.loresmt-1.18","date_updated":"2025-12-02T19:29:53Z","publisher":"Association for Computational Linguistics","author":[{"first_name":"Ana Alexandra","last_name":"Morim da Silva","id":"72108","full_name":"Morim da Silva, Ana Alexandra"},{"id":"70066","full_name":"Srivastava, Nikit","last_name":"Srivastava","orcid":"0009-0004-5164-4911","first_name":"Nikit"},{"last_name":"Moteu Ngoli","full_name":"Moteu Ngoli, Tatiana","id":"99174","first_name":"Tatiana"},{"orcid":"https://orcid.org/0000-0002-8609-8277","last_name":"Röder","full_name":"Röder, Michael","id":"67199","first_name":"Michael"},{"last_name":"Moussallem","full_name":"Moussallem, Diego","id":"71635","first_name":"Diego"},{"id":"65716","full_name":"Ngonga Ngomo, Axel-Cyrille","last_name":"Ngonga Ngomo","first_name":"Axel-Cyrille"}],"date_created":"2024-11-20T10:41:16Z","status":"public","type":"conference","publication":"Proceedings of the Seventh Workshop on Technologies for Machine Translation of Low-Resource Languages (LoResMT 2024)","language":[{"iso":"eng"}],"_id":"57278","user_id":"70066"}]
