[{"user_id":"44040","external_id":{"arxiv":["2305.00983"]},"_id":"44512","language":[{"iso":"eng"}],"type":"preprint","publication":"arXiv:2305.00983","status":"public","abstract":[{"text":"For open world applications, deep neural networks (DNNs) need to be aware of\r\npreviously unseen data and adaptable to evolving environments. Furthermore, it\r\nis desirable to detect and learn novel classes which are not included in the\r\nDNNs underlying set of semantic classes in an unsupervised fashion. The method\r\nproposed in this article builds upon anomaly detection to retrieve\r\nout-of-distribution (OoD) data as candidates for new classes. We thereafter\r\nextend the DNN by $k$ empty classes and fine-tune it on the OoD data samples.\r\nTo this end, we introduce two loss functions, which 1) entice the DNN to assign\r\nOoD samples to the empty classes and 2) to minimize the inner-class feature\r\ndistances between them. Thus, instead of ground truth which contains labels for\r\nthe different novel classes, the DNN obtains a single OoD label together with a\r\ndistance matrix, which is computed in advance. We perform several experiments\r\nfor image classification and semantic segmentation, which demonstrate that a\r\nDNN can extend its own semantic space by multiple classes without having access\r\nto ground truth.","lang":"eng"}],"date_created":"2023-05-05T11:37:00Z","author":[{"first_name":"Svenja","last_name":"Uhlemeyer","full_name":"Uhlemeyer, Svenja"},{"id":"44040","full_name":"Lienen, Julian","last_name":"Lienen","first_name":"Julian"},{"first_name":"Eyke","full_name":"Hüllermeier, Eyke","id":"48129","last_name":"Hüllermeier"},{"first_name":"Hanno","full_name":"Gottschalk, Hanno","last_name":"Gottschalk"}],"date_updated":"2023-05-05T11:39:10Z","oa":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/pdf/2305.00983.pdf"}],"title":"Detecting Novelties with Empty Classes","citation":{"ieee":"S. Uhlemeyer, J. Lienen, E. Hüllermeier, and H. Gottschalk, “Detecting Novelties with Empty Classes,” <i>arXiv:2305.00983</i>. 2023.","chicago":"Uhlemeyer, Svenja, Julian Lienen, Eyke Hüllermeier, and Hanno Gottschalk. “Detecting Novelties with Empty Classes.” <i>ArXiv:2305.00983</i>, 2023.","ama":"Uhlemeyer S, Lienen J, Hüllermeier E, Gottschalk H. Detecting Novelties with Empty Classes. <i>arXiv:230500983</i>. Published online 2023.","short":"S. Uhlemeyer, J. Lienen, E. Hüllermeier, H. Gottschalk, ArXiv:2305.00983 (2023).","mla":"Uhlemeyer, Svenja, et al. “Detecting Novelties with Empty Classes.” <i>ArXiv:2305.00983</i>, 2023.","bibtex":"@article{Uhlemeyer_Lienen_Hüllermeier_Gottschalk_2023, title={Detecting Novelties with Empty Classes}, journal={arXiv:2305.00983}, author={Uhlemeyer, Svenja and Lienen, Julian and Hüllermeier, Eyke and Gottschalk, Hanno}, year={2023} }","apa":"Uhlemeyer, S., Lienen, J., Hüllermeier, E., &#38; Gottschalk, H. (2023). Detecting Novelties with Empty Classes. In <i>arXiv:2305.00983</i>."},"year":"2023"},{"conference":{"location":"Kigali, Ruanda","name":"International Conference on Learning Representations, ICLR"},"main_file_link":[{"url":"https://arxiv.org/abs/2206.05530","open_access":"1"}],"title":"Memorization-Dilation: Modeling Neural Collapse Under Noise","author":[{"first_name":"Duc Anh","full_name":"Nguyen, Duc Anh","last_name":"Nguyen"},{"first_name":"Ron","last_name":"Levie","full_name":"Levie, Ron"},{"full_name":"Lienen, Julian","id":"44040","last_name":"Lienen","first_name":"Julian"},{"first_name":"Gitta","last_name":"Kutyniok","full_name":"Kutyniok, Gitta"},{"first_name":"Eyke","id":"48129","full_name":"Hüllermeier, Eyke","last_name":"Hüllermeier"}],"date_created":"2022-06-14T14:48:36Z","date_updated":"2023-06-29T09:14:26Z","oa":"1","citation":{"ama":"Nguyen DA, Levie R, Lienen J, Kutyniok G, Hüllermeier E. Memorization-Dilation: Modeling Neural Collapse Under Noise. In: <i>International Conference on Learning Representations, ICLR</i>. ; 2023.","ieee":"D. A. Nguyen, R. Levie, J. Lienen, G. Kutyniok, and E. Hüllermeier, “Memorization-Dilation: Modeling Neural Collapse Under Noise,” presented at the International Conference on Learning Representations, ICLR, Kigali, Ruanda, 2023.","chicago":"Nguyen, Duc Anh, Ron Levie, Julian Lienen, Gitta Kutyniok, and Eyke Hüllermeier. “Memorization-Dilation: Modeling Neural Collapse Under Noise.” In <i>International Conference on Learning Representations, ICLR</i>, 2023.","apa":"Nguyen, D. A., Levie, R., Lienen, J., Kutyniok, G., &#38; Hüllermeier, E. (2023). Memorization-Dilation: Modeling Neural Collapse Under Noise. <i>International Conference on Learning Representations, ICLR</i>. International Conference on Learning Representations, ICLR, Kigali, Ruanda.","mla":"Nguyen, Duc Anh, et al. “Memorization-Dilation: Modeling Neural Collapse Under Noise.” <i>International Conference on Learning Representations, ICLR</i>, 2023.","bibtex":"@inproceedings{Nguyen_Levie_Lienen_Kutyniok_Hüllermeier_2023, title={Memorization-Dilation: Modeling Neural Collapse Under Noise}, booktitle={International Conference on Learning Representations, ICLR}, author={Nguyen, Duc Anh and Levie, Ron and Lienen, Julian and Kutyniok, Gitta and Hüllermeier, Eyke}, year={2023} }","short":"D.A. Nguyen, R. Levie, J. Lienen, G. Kutyniok, E. Hüllermeier, in: International Conference on Learning Representations, ICLR, 2023."},"year":"2023","language":[{"iso":"eng"}],"user_id":"44040","_id":"31880","status":"public","abstract":[{"text":"The notion of neural collapse refers to several emergent phenomena that have been empirically observed across various canonical classification problems. During the terminal phase of training a deep neural network, the feature embedding of all examples of the same class tend to collapse to a single representation, and the features of different classes tend to separate as much as possible. Neural collapse is often studied through a simplified model, called the unconstrained feature representation, in which the model is assumed to have \"infinite expressivity\" and can map each data point to any arbitrary representation. In this work, we propose a more realistic variant of the unconstrained feature representation that takes the limited expressivity of the network into account. Empirical evidence suggests that the memorization of noisy data points leads to a degradation (dilation) of the neural collapse. Using a model of the memorization-dilation (M-D) phenomenon, we show one mechanism by which different losses lead to different performances of the trained network on noisy data. Our proofs reveal why label smoothing, a modification of cross-entropy empirically observed to produce a regularization effect, leads to improved generalization in classification tasks.","lang":"eng"}],"publication":"International Conference on Learning Representations, ICLR","type":"conference"},{"external_id":{"arxiv":["2305.13764"]},"_id":"45911","user_id":"44040","language":[{"iso":"eng"}],"type":"preprint","publication":"arXiv:2305.13764","abstract":[{"text":"Label noise poses an important challenge in machine learning, especially in\r\ndeep learning, in which large models with high expressive power dominate the\r\nfield. Models of that kind are prone to memorizing incorrect labels, thereby\r\nharming generalization performance. Many methods have been proposed to address\r\nthis problem, including robust loss functions and more complex label correction\r\napproaches. Robust loss functions are appealing due to their simplicity, but\r\ntypically lack flexibility, while label correction usually adds substantial\r\ncomplexity to the training setup. In this paper, we suggest to address the\r\nshortcomings of both methodologies by \"ambiguating\" the target information,\r\nadding additional, complementary candidate labels in case the learner is not\r\nsufficiently convinced of the observed training label. More precisely, we\r\nleverage the framework of so-called superset learning to construct set-valued\r\ntargets based on a confidence threshold, which deliver imprecise yet more\r\nreliable beliefs about the ground-truth, effectively helping the learner to\r\nsuppress the memorization effect. In an extensive empirical evaluation, our\r\nmethod demonstrates favorable learning behavior on synthetic and real-world\r\nnoise, confirming the effectiveness in detecting and correcting erroneous\r\ntraining labels.","lang":"eng"}],"status":"public","date_updated":"2023-07-09T11:26:21Z","oa":"1","date_created":"2023-07-09T11:25:48Z","author":[{"first_name":"Julian","full_name":"Lienen, Julian","id":"44040","last_name":"Lienen"},{"last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke","id":"48129","first_name":"Eyke"}],"title":"Mitigating Label Noise through Data Ambiguation","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2305.13764"}],"year":"2023","citation":{"apa":"Lienen, J., &#38; Hüllermeier, E. (2023). Mitigating Label Noise through Data Ambiguation. In <i>arXiv:2305.13764</i>.","bibtex":"@article{Lienen_Hüllermeier_2023, title={Mitigating Label Noise through Data Ambiguation}, journal={arXiv:2305.13764}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2023} }","short":"J. Lienen, E. Hüllermeier, ArXiv:2305.13764 (2023).","mla":"Lienen, Julian, and Eyke Hüllermeier. “Mitigating Label Noise through Data Ambiguation.” <i>ArXiv:2305.13764</i>, 2023.","ama":"Lienen J, Hüllermeier E. Mitigating Label Noise through Data Ambiguation. <i>arXiv:230513764</i>. Published online 2023.","ieee":"J. Lienen and E. Hüllermeier, “Mitigating Label Noise through Data Ambiguation,” <i>arXiv:2305.13764</i>. 2023.","chicago":"Lienen, Julian, and Eyke Hüllermeier. “Mitigating Label Noise through Data Ambiguation.” <i>ArXiv:2305.13764</i>, 2023."}},{"date_created":"2022-12-19T09:34:35Z","author":[{"full_name":"Campagner, Andrea","last_name":"Campagner","first_name":"Andrea"},{"first_name":"Julian","last_name":"Lienen","id":"44040","full_name":"Lienen, Julian"},{"id":"48129","full_name":"Hüllermeier, Eyke","last_name":"Hüllermeier","first_name":"Eyke"},{"first_name":"Davide","full_name":"Ciucci, Davide","last_name":"Ciucci"}],"volume":13633,"publisher":"Springer","date_updated":"2022-12-19T09:34:44Z","conference":{"location":"Suzhou, China","end_date":"2022-11-14","start_date":"2022-11-11","name":"International Joint Conference on Rough Sets"},"title":"Scikit-Weak: A Python Library for Weakly Supervised Machine Learning","citation":{"ama":"Campagner A, Lienen J, Hüllermeier E, Ciucci D. Scikit-Weak: A Python Library for Weakly Supervised Machine Learning. In: <i>Lecture Notes in Computer Science</i>. Vol 13633. Springer; 2022:57-70.","ieee":"A. Campagner, J. Lienen, E. Hüllermeier, and D. Ciucci, “Scikit-Weak: A Python Library for Weakly Supervised Machine Learning,” in <i>Lecture Notes in Computer Science</i>, Suzhou, China, 2022, vol. 13633, pp. 57–70.","chicago":"Campagner, Andrea, Julian Lienen, Eyke Hüllermeier, and Davide Ciucci. “Scikit-Weak: A Python Library for Weakly Supervised Machine Learning.” In <i>Lecture Notes in Computer Science</i>, 13633:57–70. Springer, 2022.","short":"A. Campagner, J. Lienen, E. Hüllermeier, D. Ciucci, in: Lecture Notes in Computer Science, Springer, 2022, pp. 57–70.","mla":"Campagner, Andrea, et al. “Scikit-Weak: A Python Library for Weakly Supervised Machine Learning.” <i>Lecture Notes in Computer Science</i>, vol. 13633, Springer, 2022, pp. 57–70.","bibtex":"@inproceedings{Campagner_Lienen_Hüllermeier_Ciucci_2022, title={Scikit-Weak: A Python Library for Weakly Supervised Machine Learning}, volume={13633}, booktitle={Lecture Notes in Computer Science}, publisher={Springer}, author={Campagner, Andrea and Lienen, Julian and Hüllermeier, Eyke and Ciucci, Davide}, year={2022}, pages={57–70} }","apa":"Campagner, A., Lienen, J., Hüllermeier, E., &#38; Ciucci, D. (2022). Scikit-Weak: A Python Library for Weakly Supervised Machine Learning. <i>Lecture Notes in Computer Science</i>, <i>13633</i>, 57–70."},"intvolume":"     13633","page":"57-70","year":"2022","user_id":"44040","_id":"34542","language":[{"iso":"eng"}],"type":"conference","publication":"Lecture Notes in Computer Science","status":"public"},{"abstract":[{"lang":"eng","text":"Knowledge graph embedding research has mainly focused on learning continuous representations of entities and relations tailored towards the link prediction problem. Recent results indicate an ever increasing predictive ability of current approaches on benchmark datasets. However, this effectiveness often comes with the cost of over-parameterization and increased computationally complexity. The former induces extensive hyperparameter optimization to mitigate malicious overfitting. The latter magnifies the importance of winning the hardware lottery. Here, we investigate a remedy for the first problem. We propose a technique based on Kronecker decomposition to reduce the number of parameters in a knowledge graph embedding model, while retaining its expressiveness. Through Kronecker decomposition, large embedding matrices are split into smaller embedding matrices during the training process. Hence, embeddings of knowledge graphs are not plainly retrieved but reconstructed on the fly. The decomposition ensures that elementwise interactions between three embedding vectors are extended with interactions within each embedding vector. This implicitly reduces redundancy in embedding vectors and encourages feature reuse. To quantify the impact of applying Kronecker decomposition on embedding matrices, we conduct a series of experiments on benchmark datasets. Our experiments suggest that applying Kronecker decomposition on embedding matrices leads to an improved parameter efficiency on all benchmark datasets. Moreover, empirical evidence suggests that reconstructed embeddings entail robustness against noise in the input knowledge graph. To foster reproducible research, we provide an open-source implementation of our approach, including training and evaluation scripts as well as pre-trained models in our knowledge graph embedding framework."}],"status":"public","publication":"arXiv:2205.06560","type":"preprint","language":[{"iso":"eng"}],"_id":"31545","user_id":"44040","year":"2022","citation":{"ama":"Demir C, Lienen J, Ngonga Ngomo A-C. Kronecker Decomposition for Knowledge Graph Embeddings. <i>arXiv:220506560</i>. Published online 2022.","chicago":"Demir, Caglar, Julian Lienen, and Axel-Cyrille Ngonga Ngomo. “Kronecker Decomposition for Knowledge Graph Embeddings.” <i>ArXiv:2205.06560</i>, 2022.","ieee":"C. Demir, J. Lienen, and A.-C. Ngonga Ngomo, “Kronecker Decomposition for Knowledge Graph Embeddings,” <i>arXiv:2205.06560</i>. 2022.","apa":"Demir, C., Lienen, J., &#38; Ngonga Ngomo, A.-C. (2022). Kronecker Decomposition for Knowledge Graph Embeddings. In <i>arXiv:2205.06560</i>.","bibtex":"@article{Demir_Lienen_Ngonga Ngomo_2022, title={Kronecker Decomposition for Knowledge Graph Embeddings}, journal={arXiv:2205.06560}, author={Demir, Caglar and Lienen, Julian and Ngonga Ngomo, Axel-Cyrille}, year={2022} }","mla":"Demir, Caglar, et al. “Kronecker Decomposition for Knowledge Graph Embeddings.” <i>ArXiv:2205.06560</i>, 2022.","short":"C. Demir, J. Lienen, A.-C. Ngonga Ngomo, ArXiv:2205.06560 (2022)."},"title":"Kronecker Decomposition for Knowledge Graph Embeddings","main_file_link":[{"url":"https://arxiv.org/abs/2205.06560","open_access":"1"}],"oa":"1","date_updated":"2022-05-31T07:05:50Z","date_created":"2022-05-31T07:04:36Z","author":[{"first_name":"Caglar","id":"43817","full_name":"Demir, Caglar","last_name":"Demir"},{"full_name":"Lienen, Julian","id":"44040","last_name":"Lienen","first_name":"Julian"},{"last_name":"Ngonga Ngomo","id":"65716","full_name":"Ngonga Ngomo, Axel-Cyrille","first_name":"Axel-Cyrille"}]},{"language":[{"iso":"eng"}],"_id":"31546","user_id":"44040","abstract":[{"lang":"eng","text":"In semi-supervised learning, the paradigm of self-training refers to the idea of learning from pseudo-labels suggested by the learner itself. Across various domains, corresponding methods have proven effective and achieve state-of-the-art performance. However, pseudo-labels typically stem from ad-hoc heuristics, relying on the quality of the predictions though without guaranteeing their validity. One such method, so-called credal self-supervised learning, maintains pseudo-supervision in the form of sets of (instead of single) probability distributions over labels, thereby allowing for a flexible yet uncertainty-aware labeling. Again, however, there is no justification beyond empirical effectiveness. To address this deficiency, we make use of conformal prediction, an approach that comes with guarantees on the validity of set-valued predictions. As a result, the construction of credal sets of labels is supported by a rigorous theoretical foundation, leading to better calibrated and less error-prone supervision for unlabeled data. Along with this, we present effective algorithms for learning from credal self-supervision. An empirical study demonstrates excellent calibration properties of the pseudo-supervision, as well as the competitiveness of our method on several benchmark datasets."}],"status":"public","publication":"arXiv:2205.15239","type":"preprint","title":"Conformal Credal Self-Supervised Learning","main_file_link":[{"url":"https://arxiv.org/abs/2205.15239","open_access":"1"}],"date_updated":"2022-05-31T07:05:54Z","oa":"1","author":[{"first_name":"Julian","last_name":"Lienen","full_name":"Lienen, Julian","id":"44040"},{"last_name":"Demir","id":"43817","full_name":"Demir, Caglar","first_name":"Caglar"},{"full_name":"Hüllermeier, Eyke","id":"48129","last_name":"Hüllermeier","first_name":"Eyke"}],"date_created":"2022-05-31T07:05:36Z","year":"2022","citation":{"chicago":"Lienen, Julian, Caglar Demir, and Eyke Hüllermeier. “Conformal Credal Self-Supervised Learning.” <i>ArXiv:2205.15239</i>, 2022.","ieee":"J. Lienen, C. Demir, and E. Hüllermeier, “Conformal Credal Self-Supervised Learning,” <i>arXiv:2205.15239</i>. 2022.","ama":"Lienen J, Demir C, Hüllermeier E. Conformal Credal Self-Supervised Learning. <i>arXiv:220515239</i>. Published online 2022.","apa":"Lienen, J., Demir, C., &#38; Hüllermeier, E. (2022). Conformal Credal Self-Supervised Learning. In <i>arXiv:2205.15239</i>.","short":"J. Lienen, C. Demir, E. Hüllermeier, ArXiv:2205.15239 (2022).","bibtex":"@article{Lienen_Demir_Hüllermeier_2022, title={Conformal Credal Self-Supervised Learning}, journal={arXiv:2205.15239}, author={Lienen, Julian and Demir, Caglar and Hüllermeier, Eyke}, year={2022} }","mla":"Lienen, Julian, et al. “Conformal Credal Self-Supervised Learning.” <i>ArXiv:2205.15239</i>, 2022."}},{"status":"public","citation":{"ieee":"J. Lienen and E. Hüllermeier, “Credal Self-Supervised Learning,” presented at the Annual Conference on Neural Information Processing Systems, NeurIPS, Online, 2021.","chicago":"Lienen, Julian, and Eyke Hüllermeier. “Credal Self-Supervised Learning.” In <i>Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS</i>, 2021.","ama":"Lienen J, Hüllermeier E. Credal Self-Supervised Learning. In: <i>Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS</i>. ; 2021.","apa":"Lienen, J., &#38; Hüllermeier, E. (2021). Credal Self-Supervised Learning. <i>Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS</i>. Annual Conference on Neural Information Processing Systems, NeurIPS, Online.","short":"J. Lienen, E. Hüllermeier, in: Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS, 2021.","bibtex":"@inproceedings{Lienen_Hüllermeier_2021, title={Credal Self-Supervised Learning}, booktitle={Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2021} }","mla":"Lienen, Julian, and Eyke Hüllermeier. “Credal Self-Supervised Learning.” <i>Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS</i>, 2021."},"year":"2021","publication":"Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS","type":"conference","language":[{"iso":"eng"}],"conference":{"start_date":"2021-12-06","name":"Annual Conference on Neural Information Processing Systems, NeurIPS","location":"Online","end_date":"2021-12-14"},"title":"Credal Self-Supervised Learning","user_id":"44040","date_created":"2021-11-05T10:27:51Z","author":[{"id":"44040","full_name":"Lienen, Julian","last_name":"Lienen","first_name":"Julian"},{"first_name":"Eyke","last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke"}],"_id":"27161","date_updated":"2022-01-06T06:57:35Z"},{"author":[{"last_name":"Lienen","full_name":"Lienen, Julian","id":"44040","first_name":"Julian"},{"first_name":"Nils","last_name":"Nommensen","full_name":"Nommensen, Nils"},{"first_name":"Ralph","last_name":"Ewerth","full_name":"Ewerth, Ralph"},{"first_name":"Eyke","last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke"}],"date_created":"2021-11-05T10:29:38Z","date_updated":"2022-01-06T06:57:35Z","conference":{"name":"13th Asian Conference on Machine Learning, ACML","start_date":"2021-11-17","end_date":"2021-11-19","location":"Online"},"title":"Robust Regression for Monocular Depth Estimation","citation":{"bibtex":"@inproceedings{Lienen_Nommensen_Ewerth_Hüllermeier_2021, title={Robust Regression for Monocular Depth Estimation}, booktitle={13th Asian Conference on Machine Learning, ACML}, author={Lienen, Julian and Nommensen, Nils and Ewerth, Ralph and Hüllermeier, Eyke}, year={2021} }","mla":"Lienen, Julian, et al. “Robust Regression for Monocular Depth Estimation.” <i>13th Asian Conference on Machine Learning, ACML</i>, 2021.","short":"J. Lienen, N. Nommensen, R. Ewerth, E. Hüllermeier, in: 13th Asian Conference on Machine Learning, ACML, 2021.","apa":"Lienen, J., Nommensen, N., Ewerth, R., &#38; Hüllermeier, E. (2021). Robust Regression for Monocular Depth Estimation. <i>13th Asian Conference on Machine Learning, ACML</i>. 13th Asian Conference on Machine Learning, ACML, Online.","ama":"Lienen J, Nommensen N, Ewerth R, Hüllermeier E. Robust Regression for Monocular Depth Estimation. In: <i>13th Asian Conference on Machine Learning, ACML</i>. ; 2021.","chicago":"Lienen, Julian, Nils Nommensen, Ralph Ewerth, and Eyke Hüllermeier. “Robust Regression for Monocular Depth Estimation.” In <i>13th Asian Conference on Machine Learning, ACML</i>, 2021.","ieee":"J. Lienen, N. Nommensen, R. Ewerth, and E. Hüllermeier, “Robust Regression for Monocular Depth Estimation,” presented at the 13th Asian Conference on Machine Learning, ACML, Online, 2021."},"year":"2021","user_id":"44040","_id":"27162","language":[{"iso":"eng"}],"type":"conference","publication":"13th Asian Conference on Machine Learning, ACML","status":"public"},{"main_file_link":[{"url":"https://www.sciencedirect.com/science/article/pii/S0888613X21000463"}],"title":"Instance weighting through data imprecisiation","date_created":"2021-04-20T06:48:18Z","author":[{"last_name":"Lienen","id":"44040","full_name":"Lienen, Julian","first_name":"Julian"},{"last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke","id":"48129","first_name":"Eyke"}],"date_updated":"2022-01-06T06:55:08Z","publisher":"Elsevier","citation":{"short":"J. Lienen, E. Hüllermeier, International Journal of Approximate Reasoning (2021).","bibtex":"@article{Lienen_Hüllermeier_2021, title={Instance weighting through data imprecisiation}, journal={International Journal of Approximate Reasoning}, publisher={Elsevier}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2021} }","mla":"Lienen, Julian, and Eyke Hüllermeier. “Instance Weighting through Data Imprecisiation.” <i>International Journal of Approximate Reasoning</i>, Elsevier, 2021.","apa":"Lienen, J., &#38; Hüllermeier, E. (2021). Instance weighting through data imprecisiation. <i>International Journal of Approximate Reasoning</i>.","ama":"Lienen J, Hüllermeier E. Instance weighting through data imprecisiation. <i>International Journal of Approximate Reasoning</i>. 2021.","chicago":"Lienen, Julian, and Eyke Hüllermeier. “Instance Weighting through Data Imprecisiation.” <i>International Journal of Approximate Reasoning</i>, 2021.","ieee":"J. Lienen and E. Hüllermeier, “Instance weighting through data imprecisiation,” <i>International Journal of Approximate Reasoning</i>, 2021."},"year":"2021","language":[{"iso":"eng"}],"user_id":"44040","_id":"21636","status":"public","publication":"International Journal of Approximate Reasoning","type":"journal_article"},{"intvolume":"        35","page":"8583-8591","citation":{"apa":"Lienen, J., &#38; Hüllermeier, E. (2021). From Label Smoothing to Label Relaxation. In <i>Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI</i> (Vol. 35, pp. 8583–8591). Online: AAAI Press.","bibtex":"@inproceedings{Lienen_Hüllermeier_2021, title={From Label Smoothing to Label Relaxation}, volume={35}, number={10}, booktitle={Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI}, publisher={AAAI Press}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2021}, pages={8583–8591} }","mla":"Lienen, Julian, and Eyke Hüllermeier. “From Label Smoothing to Label Relaxation.” <i>Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI</i>, vol. 35, no. 10, AAAI Press, 2021, pp. 8583–91.","short":"J. Lienen, E. Hüllermeier, in: Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI, AAAI Press, 2021, pp. 8583–8591.","chicago":"Lienen, Julian, and Eyke Hüllermeier. “From Label Smoothing to Label Relaxation.” In <i>Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI</i>, 35:8583–91. AAAI Press, 2021.","ieee":"J. Lienen and E. Hüllermeier, “From Label Smoothing to Label Relaxation,” in <i>Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI</i>, Online, 2021, vol. 35, no. 10, pp. 8583–8591.","ama":"Lienen J, Hüllermeier E. From Label Smoothing to Label Relaxation. In: <i>Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI</i>. Vol 35. AAAI Press; 2021:8583-8591."},"year":"2021","issue":"10","conference":{"name":"35th AAAI Conference on Artificial Intelligence, AAAI","start_date":"2021-02-02","end_date":"2021-02-09","location":"Online"},"main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/17041"}],"title":"From Label Smoothing to Label Relaxation","volume":35,"date_created":"2021-04-20T06:50:43Z","author":[{"first_name":"Julian","last_name":"Lienen","full_name":"Lienen, Julian","id":"44040"},{"first_name":"Eyke","full_name":"Hüllermeier, Eyke","id":"48129","last_name":"Hüllermeier"}],"publisher":"AAAI Press","date_updated":"2022-01-06T06:55:08Z","oa":"1","status":"public","publication":"Proceedings of the 35th AAAI Conference on Artificial Intelligence, AAAI","type":"conference","language":[{"iso":"eng"}],"user_id":"44040","_id":"21637"},{"citation":{"apa":"Lienen, J., Hüllermeier, E., Ewerth, R., &#38; Nommensen, N. (2021). Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce Model. <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR</i>, 14595–14604.","short":"J. Lienen, E. Hüllermeier, R. Ewerth, N. Nommensen, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, 2021, pp. 14595–14604.","mla":"Lienen, Julian, et al. “Monocular Depth Estimation via Listwise Ranking Using the Plackett-Luce Model.” <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR</i>, 2021, pp. 14595–604.","bibtex":"@inproceedings{Lienen_Hüllermeier_Ewerth_Nommensen_2021, title={Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce Model}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR}, author={Lienen, Julian and Hüllermeier, Eyke and Ewerth, Ralph and Nommensen, Nils}, year={2021}, pages={14595–14604} }","ama":"Lienen J, Hüllermeier E, Ewerth R, Nommensen N. Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce Model. In: <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR</i>. ; 2021:14595-14604.","chicago":"Lienen, Julian, Eyke Hüllermeier, Ralph Ewerth, and Nils Nommensen. “Monocular Depth Estimation via Listwise Ranking Using the Plackett-Luce Model.” In <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR</i>, 14595–604, 2021.","ieee":"J. Lienen, E. Hüllermeier, R. Ewerth, and N. Nommensen, “Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce Model,” in <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR</i>, Online, 2021, pp. 14595–14604."},"page":"14595-14604","year":"2021","conference":{"location":"Online","end_date":"2021-06-25","start_date":"2021-06-19","name":"IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR"},"title":"Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce Model","author":[{"first_name":"Julian","id":"44040","full_name":"Lienen, Julian","last_name":"Lienen"},{"first_name":"Eyke","last_name":"Hüllermeier","full_name":"Hüllermeier, Eyke","id":"48129"},{"first_name":"Ralph","full_name":"Ewerth, Ralph","last_name":"Ewerth"},{"first_name":"Nils","full_name":"Nommensen, Nils","last_name":"Nommensen"}],"date_created":"2021-06-02T10:35:40Z","date_updated":"2022-01-06T06:55:29Z","status":"public","type":"conference","publication":"Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR","language":[{"iso":"eng"}],"user_id":"44040","_id":"22280"},{"year":"2021","citation":{"chicago":"Lienen, Julian, and Eyke Hüllermeier. “Credal Self-Supervised Learning.” <i>ArXiv:2106.11853</i>, 2021.","ieee":"J. Lienen and E. Hüllermeier, “Credal Self-Supervised Learning,” <i>arXiv:2106.11853</i>. 2021.","ama":"Lienen J, Hüllermeier E. Credal Self-Supervised Learning. <i>arXiv:210611853</i>. 2021.","short":"J. Lienen, E. Hüllermeier, ArXiv:2106.11853 (2021).","bibtex":"@article{Lienen_Hüllermeier_2021, title={Credal Self-Supervised Learning}, journal={arXiv:2106.11853}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2021} }","mla":"Lienen, Julian, and Eyke Hüllermeier. “Credal Self-Supervised Learning.” <i>ArXiv:2106.11853</i>, 2021.","apa":"Lienen, J., &#38; Hüllermeier, E. (2021). Credal Self-Supervised Learning. <i>ArXiv:2106.11853</i>."},"date_updated":"2022-01-06T06:55:35Z","oa":"1","author":[{"id":"44040","full_name":"Lienen, Julian","last_name":"Lienen","first_name":"Julian"},{"first_name":"Eyke","id":"48129","full_name":"Hüllermeier, Eyke","last_name":"Hüllermeier"}],"date_created":"2021-06-23T07:24:38Z","title":"Credal Self-Supervised Learning","main_file_link":[{"url":"https://arxiv.org/pdf/2106.11853.pdf","open_access":"1"}],"publication":"arXiv:2106.11853","type":"preprint","abstract":[{"lang":"eng","text":"Self-training is an effective approach to semi-supervised learning. The key idea is to let the learner itself iteratively generate \"pseudo-supervision\" for unlabeled instances based on its current hypothesis. In combination with consistency regularization, pseudo-labeling has shown promising performance in various domains, for example in computer vision. To account for the hypothetical nature of the pseudo-labels, these are commonly provided in the form of probability distributions. Still, one may argue that even a probability distribution represents an excessive level of informedness, as it suggests that the learner precisely knows the ground-truth conditional probabilities. In our approach, we therefore allow the learner to label instances in the form of credal sets, that is, sets of (candidate) probability distributions. Thanks to this increased expressiveness, the learner is able to represent uncertainty and a lack of knowledge in a more flexible and more faithful manner. To learn from weakly labeled data of that kind, we leverage methods that have recently been proposed in the realm of so-called superset learning. In an exhaustive empirical evaluation, we compare our methodology to state-of-the-art self-supervision approaches, showing competitive to superior performance especially in low-label scenarios incorporating a high degree of uncertainty."}],"status":"public","_id":"22509","user_id":"44040","language":[{"iso":"eng"}]},{"publication":"arXiv:2010.13118","type":"preprint","abstract":[{"text":"In many real-world applications, the relative depth of objects in an image is\r\ncrucial for scene understanding, e.g., to calculate occlusions in augmented\r\nreality scenes. Predicting depth in monocular images has recently been tackled\r\nusing machine learning methods, mainly by treating the problem as a regression\r\ntask. Yet, being interested in an order relation in the first place,\r\nranking methods suggest themselves as a natural alternative to regression, and\r\nindeed, ranking approaches leveraging pairwise comparisons as training\r\ninformation (\"object A is closer to the camera than B\") have shown promising\r\nperformance on this problem. In this paper, we elaborate on the use of\r\nso-called \\emph{listwise} ranking as a generalization of the pairwise approach.\r\nListwise ranking goes beyond pairwise comparisons between objects and considers\r\nrankings of arbitrary length as training information. Our approach is based on\r\nthe Plackett-Luce model, a probability distribution on rankings, which we\r\ncombine with a state-of-the-art neural network architecture and a sampling\r\nstrategy to reduce training complexity. An empirical evaluation on benchmark\r\ndata in a \"zero-shot\" setting demonstrates the effectiveness of our proposal\r\ncompared to existing ranking and regression methods.","lang":"eng"}],"status":"public","_id":"20211","user_id":"44040","language":[{"iso":"eng"}],"year":"2020","citation":{"mla":"Lienen, Julian, and Eyke Hüllermeier. “Monocular Depth Estimation via Listwise Ranking Using the Plackett-Luce  Model.” <i>ArXiv:2010.13118</i>, 2020.","bibtex":"@article{Lienen_Hüllermeier_2020, title={Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce  model}, journal={arXiv:2010.13118}, author={Lienen, Julian and Hüllermeier, Eyke}, year={2020} }","short":"J. Lienen, E. Hüllermeier, ArXiv:2010.13118 (2020).","apa":"Lienen, J., &#38; Hüllermeier, E. (2020). Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce  model. <i>ArXiv:2010.13118</i>.","ama":"Lienen J, Hüllermeier E. Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce  model. <i>arXiv:201013118</i>. 2020.","chicago":"Lienen, Julian, and Eyke Hüllermeier. “Monocular Depth Estimation via Listwise Ranking Using the Plackett-Luce  Model.” <i>ArXiv:2010.13118</i>, 2020.","ieee":"J. Lienen and E. Hüllermeier, “Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce  model,” <i>arXiv:2010.13118</i>. 2020."},"oa":"1","date_updated":"2022-01-06T06:54:23Z","author":[{"full_name":"Lienen, Julian","id":"44040","last_name":"Lienen","first_name":"Julian"},{"last_name":"Hüllermeier","id":"48129","full_name":"Hüllermeier, Eyke","first_name":"Eyke"}],"date_created":"2020-10-27T07:48:40Z","title":"Monocular Depth Estimation via Listwise Ranking using the Plackett-Luce  model","main_file_link":[{"url":"https://arxiv.org/abs/2010.13118","open_access":"1"}]},{"language":[{"iso":"eng"}],"title":"Automated Feature Engineering on Time Series Data","author":[{"first_name":"Julian","last_name":"Lienen","full_name":"Lienen, Julian","id":"44040"}],"date_created":"2020-04-06T11:44:17Z","user_id":"44040","_id":"16415","date_updated":"2022-01-06T06:52:50Z","status":"public","citation":{"chicago":"Lienen, Julian. <i>Automated Feature Engineering on Time Series Data</i>, 2019.","ieee":"J. Lienen, <i>Automated Feature Engineering on Time Series Data</i>. 2019.","ama":"Lienen J. <i>Automated Feature Engineering on Time Series Data</i>.; 2019.","apa":"Lienen, J. (2019). <i>Automated Feature Engineering on Time Series Data</i>.","bibtex":"@book{Lienen_2019, title={Automated Feature Engineering on Time Series Data}, author={Lienen, Julian}, year={2019} }","short":"J. Lienen, Automated Feature Engineering on Time Series Data, 2019.","mla":"Lienen, Julian. <i>Automated Feature Engineering on Time Series Data</i>. 2019."},"year":"2019","type":"mastersthesis"}]
