@inproceedings{63572,
  author       = {{Demir, Caglar and Yekini, Moshood Olawale and Röder, Michael and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032060655}},
  issn         = {{0302-9743}},
  location     = {{Porto}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Tree-Based OWL Class Expression Learner over Large Graphs}}},
  doi          = {{10.1007/978-3-032-06066-2_29}},
  year         = {{2025}},
}

@inproceedings{63575,
  author       = {{Kapoor, Sourabh and Sharma, Arnab and Röder, Michael and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031945748}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Robustness Evaluation of Knowledge Graph Embedding Models Under Non-targeted Attacks}}},
  doi          = {{10.1007/978-3-031-94575-5_15}},
  year         = {{2025}},
}

@inproceedings{63573,
  author       = {{Memariani, Adel and Röder, Michael and Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032095268}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Link Prediction Under Non-targeted Attacks: Do Soft Labels Always Help?}}},
  doi          = {{10.1007/978-3-032-09527-5_6}},
  year         = {{2025}},
}

@inproceedings{54449,
  author       = {{KOUAGOU, N'Dah Jean and Demir, Caglar and Zahera, Hamada Mohamed Abdelsamee and Wilke, Adrian and Heindorf, Stefan and Li, Jiayi and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Companion Proceedings of the ACM on Web Conference 2024}},
  location     = {{Singapore}},
  publisher    = {{ACM}},
  title        = {{{Universal Knowledge Graph Embeddings}}},
  doi          = {{10.1145/3589335.3651978}},
  year         = {{2024}},
}

@article{58049,
  abstract     = {{In recent years, knowledge graph embedding models have been successfully applied in the transductive setting to tackle various challenging tasks including link prediction, and query answering. Yet, the transductive setting does not allow for reasoning over unseen entities, relations, let alone numerical or non-numerical literals. Although increasing efforts are put into exploring inductive scenarios, inference over unseen entities, relations, and literals has yet to come. This limitation prohibits the existing methods from handling real-world dynamic knowledge graphs involving heterogeneous information about the world. Here, we propose a remedy to this limitation. We propose the attentive byte-pair encoding layer (BytE) to construct a triple embedding from a sequence of byte-pair encoded subword units of entities and relations. Compared to the conventional setting, BytE leads to massive feature reuse via weight tying, since it forces a knowledge graph embedding model to learn embeddings for subword units instead of entities and relations directly. Consequently, the size of the embedding matrices are not anymore bound to the unique number of entities and relations of a knowledge graph. Experimental results show that BytE improves the link prediction performance of 4 knowledge graph embedding models on datasets where the syntactic representations of triples are semantically meaningful. However, benefits of training a knowledge graph embedding model with BytE dissipate on knowledge graphs where entities and relations are represented with plain numbers or URIs. We provide an open source implementation of BytE to foster reproducible research.}},
  author       = {{Demir, Caglar and KOUAGOU, N'Dah Jean and Sharma, Arnab and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Arxiv}},
  title        = {{{Inference over Unseen Entities, Relations and Literals on Knowledge Graphs}}},
  doi          = {{10.48550/ARXIV.2410.06742}},
  year         = {{2024}},
}

@article{58051,
  author       = {{Demir, Caglar and Sharma, Arnab and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{arxiv}},
  title        = {{{Adaptive Stochastic Weight Averaging}}},
  doi          = {{10.48550/ARXIV.2406.19092}},
  year         = {{2024}},
}

@inproceedings{55653,
  abstract     = {{We consider the problem of class expression learning using cardinality-minimal sets of examples. Recent class expression learning approaches employ deep neural networks and have demonstrated tremendous performance improvements in execution time and quality of the computed solutions. However, they lack generalization capabilities when it comes to the number of examples used in a learning problem, i.e., they often perform poorly on unseen learning problems where only a few examples are given. In this work, we propose a generalization of the classical class expression learning problem to address the limitations above. In short, our generalized learning problem (GLP) forces learning systems to solve the classical class expression learning problem using the smallest possible subsets of examples, thereby improving the learning systems' ability to solve unseen learning problems with arbitrary numbers of examples. Moreover, we develop ROCES, a learning algorithm for synthesis-based approaches to solve GLP. Experimental results suggest that post training, ROCES outperforms existing synthesis-based approaches on out-of-distribution learning problems while remaining highly competitive overall.}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the Thirty-ThirdInternational Joint Conference on Artificial Intelligence}},
  publisher    = {{International Joint Conferences on Artificial Intelligence Organization}},
  title        = {{{ROCES: Robust Class Expression Synthesis in Description Logics via Iterative Sampling}}},
  doi          = {{10.24963/ijcai.2024/479}},
  year         = {{2024}},
}

@inbook{46460,
  author       = {{Ngonga Ngomo, Axel-Cyrille and Demir, Caglar and Kouagou, N'Dah Jean and Heindorf, Stefan and Karalis, Nikoloas and Bigerl, Alexander}},
  booktitle    = {{Compendium of Neurosymbolic Artificial Intelligence}},
  pages        = {{272–286}},
  publisher    = {{IOS Press}},
  title        = {{{Class Expression Learning with Multiple Representations}}},
  year         = {{2023}},
}

@article{46248,
  author       = {{Demir, Caglar and Wiebesiek, Michel and Lu, Renzhong and Ngonga Ngomo, Axel-Cyrille and Heindorf, Stefan}},
  journal      = {{ECML PKDD}},
  location     = {{Torino}},
  title        = {{{LitCQD: Multi-Hop Reasoning in Incomplete Knowledge Graphs with Numeric Literals}}},
  year         = {{2023}},
}

@inbook{47421,
  abstract     = {{Class expression learning in description logics has long been regarded as an iterative search problem in an infinite conceptual space. Each iteration of the search process invokes a reasoner and a heuristic function. The reasoner finds the instances of the current expression, and the heuristic function computes the information gain and decides on the next step to be taken. As the size of the background knowledge base grows, search-based approaches for class expression learning become prohibitively slow. Current neural class expression synthesis (NCES) approaches investigate the use of neural networks for class expression learning in the attributive language with complement (ALC). While they show significant improvements over search-based approaches in runtime and quality of the computed solutions, they rely on the availability of pretrained embeddings for the input knowledge base. Moreover, they are not applicable to ontologies in more expressive description logics. In this paper, we propose a novel NCES approach which extends the state of the art to the description logic ALCHIQ(D). Our extension, dubbed NCES2, comes with an improved training data generator and does not require pretrained embeddings for the input knowledge base as both the embedding model and the class expression synthesizer are trained jointly. Empirical results on benchmark datasets suggest that our approach inherits the scalability capability of current NCES instances with the additional advantage that it supports more complex learning problems. NCES2 achieves the highest performance overall when compared to search-based approaches and to its predecessor NCES. We provide our source code, datasets, and pretrained models at https://github.com/dice-group/NCES2.}},
  author       = {{Kouagou, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track}},
  isbn         = {{9783031434204}},
  issn         = {{0302-9743}},
  location     = {{Turin}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Neural Class Expression Synthesis in ALCHIQ(D)}}},
  doi          = {{10.1007/978-3-031-43421-1_12}},
  year         = {{2023}},
}

@inproceedings{54612,
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{NeSy 2023, 17th International Workshop on Neural-Symbolic Learning and Reasoning, Certosa di Pontignano, Siena, Italy}},
  keywords     = {{318 SFB-TRR demir dice enexa heindorf knowgraphs kouagou ngonga sail}},
  publisher    = {{CEUR-WS}},
  title        = {{{Neural Class Expression Synthesis (Extended Abstract)}}},
  year         = {{2023}},
}

@inproceedings{54615,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Advances in Intelligent Data Analysis XXI: 21st International Symposium on Intelligent Data Analysis, IDA 2023, Louvain-la-Neuve, Belgium, April 12–14, 2023, Proceedings}},
  keywords     = {{318 SFB-TRR demir dice enexa ngonga sail}},
  pages        = {{103–115}},
  title        = {{{Learning Permutation-Invariant Embeddings for Description Logic Concepts}}},
  year         = {{2023}},
}

@inproceedings{33734,
  abstract     = {{Many applications require explainable node classification in knowledge graphs. Towards this end, a popular ``white-box'' approach is class expression learning: Given sets of positive and negative nodes, class expressions in description logics are learned that separate positive from negative nodes. Most existing approaches are search-based approaches generating many candidate class expressions and selecting the best one. However, they often take a long time to find suitable class expressions. In this paper, we cast class expression learning as a translation problem and propose a new family of class expression learning approaches which we dub neural class expression synthesizers. Training examples are ``translated'' into class expressions in a fashion akin to machine translation. Consequently, our synthesizers are not subject to the runtime limitations of search-based approaches. We study three instances of this novel family of approaches based on LSTMs, GRUs, and set transformers, respectively. An evaluation of our approach on four benchmark datasets suggests that it can effectively synthesize high-quality class expressions with respect to the input examples in approximately one second on average. Moreover, a comparison to state-of-the-art approaches suggests that we achieve better F-measures on large datasets. For reproducibility purposes, we provide our implementation as well as pretrained models in our public GitHub repository at https://github.com/dice-group/NeuralClassExpressionSynthesis}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 20th Extended Semantic Web Conference (ESWC 2023)}},
  editor       = {{Pesquita, Catia and Jimenez-Ruiz, Ernesto and McCusker, Jamie and Faria, Daniel and Dragoni, Mauro and Dimou, Anastasia and Troncy, Raphael and Hertling, Sven}},
  keywords     = {{Neural network, Concept learning, Description logics}},
  location     = {{Hersonissos, Crete, Greece}},
  pages        = {{209 -- 226}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Neural Class Expression Synthesis}}},
  doi          = {{https://doi.org/10.1007/978-3-031-33455-9_13}},
  volume       = {{13870}},
  year         = {{2023}},
}

@article{46243,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{ECML-PKDD}},
  location     = {{Torino}},
  title        = {{{Clifford Embeddings – A Generalized Approach for Embedding in Normed Algebras}}},
  year         = {{2023}},
}

@article{46251,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{International Joint Conference on Artificial Intelligence}},
  location     = {{Macau}},
  title        = {{{Neuro-Symbolic Class Expression Learning}}},
  year         = {{2023}},
}

@unpublished{31545,
  abstract     = {{Knowledge graph embedding research has mainly focused on learning continuous representations of entities and relations tailored towards the link prediction problem. Recent results indicate an ever increasing predictive ability of current approaches on benchmark datasets. However, this effectiveness often comes with the cost of over-parameterization and increased computationally complexity. The former induces extensive hyperparameter optimization to mitigate malicious overfitting. The latter magnifies the importance of winning the hardware lottery. Here, we investigate a remedy for the first problem. We propose a technique based on Kronecker decomposition to reduce the number of parameters in a knowledge graph embedding model, while retaining its expressiveness. Through Kronecker decomposition, large embedding matrices are split into smaller embedding matrices during the training process. Hence, embeddings of knowledge graphs are not plainly retrieved but reconstructed on the fly. The decomposition ensures that elementwise interactions between three embedding vectors are extended with interactions within each embedding vector. This implicitly reduces redundancy in embedding vectors and encourages feature reuse. To quantify the impact of applying Kronecker decomposition on embedding matrices, we conduct a series of experiments on benchmark datasets. Our experiments suggest that applying Kronecker decomposition on embedding matrices leads to an improved parameter efficiency on all benchmark datasets. Moreover, empirical evidence suggests that reconstructed embeddings entail robustness against noise in the input knowledge graph. To foster reproducible research, we provide an open-source implementation of our approach, including training and evaluation scripts as well as pre-trained models in our knowledge graph embedding framework.}},
  author       = {{Demir, Caglar and Lienen, Julian and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{arXiv:2205.06560}},
  title        = {{{Kronecker Decomposition for Knowledge Graph Embeddings}}},
  year         = {{2022}},
}

@unpublished{31546,
  abstract     = {{In semi-supervised learning, the paradigm of self-training refers to the idea of learning from pseudo-labels suggested by the learner itself. Across various domains, corresponding methods have proven effective and achieve state-of-the-art performance. However, pseudo-labels typically stem from ad-hoc heuristics, relying on the quality of the predictions though without guaranteeing their validity. One such method, so-called credal self-supervised learning, maintains pseudo-supervision in the form of sets of (instead of single) probability distributions over labels, thereby allowing for a flexible yet uncertainty-aware labeling. Again, however, there is no justification beyond empirical effectiveness. To address this deficiency, we make use of conformal prediction, an approach that comes with guarantees on the validity of set-valued predictions. As a result, the construction of credal sets of labels is supported by a rigorous theoretical foundation, leading to better calibrated and less error-prone supervision for unlabeled data. Along with this, we present effective algorithms for learning from credal self-supervision. An empirical study demonstrates excellent calibration properties of the pseudo-supervision, as well as the competitiveness of our method on several benchmark datasets.}},
  author       = {{Lienen, Julian and Demir, Caglar and Hüllermeier, Eyke}},
  booktitle    = {{arXiv:2205.15239}},
  title        = {{{Conformal Credal Self-Supervised Learning}}},
  year         = {{2022}},
}

@inbook{33740,
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web}},
  isbn         = {{9783031069802}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Learning Concept Lengths Accelerates Concept Learning in ALC}}},
  doi          = {{10.1007/978-3-031-06981-9_14}},
  year         = {{2022}},
}

@inproceedings{29290,
  abstract     = {{Classifying nodes in knowledge graphs is an important task, e.g., predicting
missing types of entities, predicting which molecules cause cancer, or
predicting which drugs are promising treatment candidates. While black-box
models often achieve high predictive performance, they are only post-hoc and
locally explainable and do not allow the learned model to be easily enriched
with domain knowledge. Towards this end, learning description logic concepts
from positive and negative examples has been proposed. However, learning such
concepts often takes a long time and state-of-the-art approaches provide
limited support for literal data values, although they are crucial for many
applications. In this paper, we propose EvoLearner - an evolutionary approach
to learn ALCQ(D), which is the attributive language with complement (ALC)
paired with qualified cardinality restrictions (Q) and data properties (D). We
contribute a novel initialization method for the initial population: starting
from positive examples (nodes in the knowledge graph), we perform biased random
walks and translate them to description logic concepts. Moreover, we improve
support for data properties by maximizing information gain when deciding where
to split the data. We show that our approach significantly outperforms the
state of the art on the benchmarking framework SML-Bench for structured machine
learning. Our ablation study confirms that this is due to our novel
initialization method and support for data properties.}},
  author       = {{Heindorf, Stefan and Blübaum, Lukas and Düsterhus, Nick and Werner, Till and Golani, Varun Nandkumar and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{WWW}},
  pages        = {{818--828}},
  publisher    = {{ACM}},
  title        = {{{EvoLearner: Learning Description Logics with Evolutionary Algorithms}}},
  doi          = {{10.1145/3485447.3511925}},
  year         = {{2022}},
}

@inproceedings{25206,
  author       = {{Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 18th International Conference, {ESWC} 2021, Virtual Event, June 6-10, 2021, Proceedings}},
  editor       = {{Verborgh, Ruben and Hose, Katja and Paulheim, Heiko and Champin, Pierre{-}Antoine and Maleshkova, Maria and Corcho, Oscar and Ristoski, Petar and Alam, Mehwish}},
  pages        = {{409--424}},
  publisher    = {{Springer}},
  title        = {{{Convolutional Complex Knowledge Graph Embeddings}}},
  doi          = {{10.1007/978-3-030-77385-4\_24}},
  volume       = {{12731}},
  year         = {{2021}},
}

