@inproceedings{20306,
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{Workshop MetaLearn 2020 @ NeurIPS 2020}},
  location     = {{Online}},
  title        = {{{Towards Meta-Algorithm Selection}}},
  year         = {{2020}},
}

@inbook{18014,
  author       = {{El Mesaoudi-Paul, Adil and Weiß, Dimitri and Bengs, Viktor and Hüllermeier, Eyke and Tierney, Kevin}},
  booktitle    = {{Learning and Intelligent Optimization. LION 2020.}},
  isbn         = {{9783030535513}},
  issn         = {{0302-9743}},
  pages        = {{216 -- 232}},
  publisher    = {{Springer}},
  title        = {{{Pool-Based Realtime Algorithm Configuration: A Preselection Bandit Approach}}},
  doi          = {{10.1007/978-3-030-53552-0_22}},
  volume       = {{12096}},
  year         = {{2020}},
}

@unpublished{18017,
  abstract     = {{We consider an extension of the contextual multi-armed bandit problem, in
which, instead of selecting a single alternative (arm), a learner is supposed
to make a preselection in the form of a subset of alternatives. More
specifically, in each iteration, the learner is presented a set of arms and a
context, both described in terms of feature vectors. The task of the learner is
to preselect $k$ of these arms, among which a final choice is made in a second
step. In our setup, we assume that each arm has a latent (context-dependent)
utility, and that feedback on a preselection is produced according to a
Plackett-Luce model. We propose the CPPL algorithm, which is inspired by the
well-known UCB algorithm, and evaluate this algorithm on synthetic and real
data. In particular, we consider an online algorithm selection scenario, which
served as a main motivation of our problem setting. Here, an instance (which
defines the context) from a certain problem class (such as SAT) can be solved
by different algorithms (the arms), but only $k$ of these algorithms can
actually be run.}},
  author       = {{El Mesaoudi-Paul, Adil and Bengs, Viktor and Hüllermeier, Eyke}},
  booktitle    = {{arXiv:2002.04275}},
  title        = {{{Online Preselection with Context Information under the Plackett-Luce  Model}}},
  year         = {{2020}},
}

@inproceedings{18276,
  abstract     = {{Algorithm selection (AS) deals with the automatic selection of an algorithm
from a fixed set of candidate algorithms most suitable for a specific instance
of an algorithmic problem class, where "suitability" often refers to an
algorithm's runtime. Due to possibly extremely long runtimes of candidate
algorithms, training data for algorithm selection models is usually generated
under time constraints in the sense that not all algorithms are run to
completion on all instances. Thus, training data usually comprises censored
information, as the true runtime of algorithms timed out remains unknown.
However, many standard AS approaches are not able to handle such information in
a proper way. On the other side, survival analysis (SA) naturally supports
censored data and offers appropriate ways to use such data for learning
distributional models of algorithm runtime, as we demonstrate in this work. We
leverage such models as a basis of a sophisticated decision-theoretic approach
to algorithm selection, which we dub Run2Survive. Moreover, taking advantage of
a framework of this kind, we advocate a risk-averse approach to algorithm
selection, in which the avoidance of a timeout is given high priority. In an
extensive experimental study with the standard benchmark ASlib, our approach is
shown to be highly competitive and in many cases even superior to
state-of-the-art AS approaches.}},
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Werner, Stefan and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{ACML 2020}},
  location     = {{Bangkok, Thailand}},
  title        = {{{Run2Survive: A Decision-theoretic Approach to Algorithm Selection based on Survival Analysis}}},
  year         = {{2020}},
}

@article{16725,
  author       = {{Richter, Cedric and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
  journal      = {{Journal of Automated Software Engineering}},
  publisher    = {{Springer}},
  title        = {{{Algorithm Selection for Software Validation Based on Graph Kernels}}},
  year         = {{2020}},
}

@inproceedings{15629,
  abstract     = {{In multi-label classification (MLC), each instance is associated with a set of class labels, in contrast to standard classification where an instance is assigned a single label. Binary relevance (BR) learning, which reduces a multi-label to a set of binary classification problems, one per label, is arguably the most straight-forward approach to MLC. In spite of its simplicity, BR proved to be competitive to more sophisticated MLC methods, and still achieves state-of-the-art performance for many loss functions. Somewhat surprisingly, the optimal choice of the base learner for tackling the binary classification problems has received very little attention so far. Taking advantage of the label independence assumption inherent to BR, we propose a label-wise base learner selection method optimizing label-wise macro averaged performance measures. In an extensive experimental evaluation, we find that or approach, called LiBRe, can significantly improve generalization performance.}},
  author       = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
  location     = {{Konstanz, Germany}},
  publisher    = {{Springer}},
  title        = {{{LiBRe: Label-Wise Selection of Base Learners in Binary Relevance for Multi-Label Classification}}},
  year         = {{2020}},
}

@inproceedings{8868,
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke and Hetzer, Alexander}},
  location     = {{Bayreuth, Germany}},
  title        = {{{Towards Automated Machine Learning for Multi-Label Classification}}},
  year         = {{2019}},
}

@article{10578,
  author       = {{Tagne, V. K. and Fotso, S. and Fono, L. A.  and Hüllermeier, Eyke}},
  journal      = {{New Mathematics and Natural Computation}},
  number       = {{2}},
  pages        = {{191--213}},
  title        = {{{Choice Functions Generated by Mallows and Plackett–Luce Relations}}},
  volume       = {{15}},
  year         = {{2019}},
}

@article{15001,
  author       = {{Couso, Ines and Borgelt, Christian and Hüllermeier, Eyke and Kruse, Rudolf}},
  issn         = {{1556-603X}},
  journal      = {{IEEE Computational Intelligence Magazine}},
  pages        = {{31--44}},
  title        = {{{Fuzzy Sets in Data Analysis: From Statistical Foundations to Machine Learning}}},
  doi          = {{10.1109/mci.2018.2881642}},
  year         = {{2019}},
}

@article{15002,
  abstract     = {{Many problem settings in machine learning are concerned with the simultaneous prediction of multiple target variables of diverse type. Amongst others, such problem settings arise in multivariate regression, multi-label classification, multi-task learning, dyadic prediction, zero-shot learning, network inference, and matrix completion. These subfields of machine learning are typically studied in isolation, without highlighting or exploring important relationships. In this paper, we present a unifying view on what we call multi-target prediction (MTP) problems and methods. First, we formally discuss commonalities and differences between existing MTP problems. To this end, we introduce a general framework that covers the above subfields as special cases. As a second contribution, we provide a structured overview of MTP methods. This is accomplished by identifying a number of key properties, which distinguish such methods and determine their suitability for different types of problems. Finally, we also discuss a few challenges for future research.}},
  author       = {{Waegeman, Willem and Dembczynski, Krzysztof and Hüllermeier, Eyke}},
  issn         = {{1573-756X}},
  journal      = {{Data Mining and Knowledge Discovery}},
  number       = {{2}},
  pages        = {{293--324}},
  title        = {{{Multi-target prediction: a unifying view on problems and methods}}},
  doi          = {{10.1007/s10618-018-0595-5}},
  volume       = {{33}},
  year         = {{2019}},
}

@inproceedings{15003,
  author       = {{Mortier, Thomas and Wydmuch, Marek and Dembczynski, Krzysztof and Hüllermeier, Eyke and Waegeman, Willem}},
  booktitle    = {{Proceedings of the 31st Benelux Conference on Artificial Intelligence {(BNAIC} 2019) and the 28th Belgian Dutch Conference on Machine Learning (Benelearn 2019), Brussels, Belgium, November 6-8, 2019}},
  title        = {{{Set-Valued Prediction in Multi-Class Classification}}},
  year         = {{2019}},
}

@inbook{15004,
  author       = {{Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke}},
  booktitle    = {{Discovery Science}},
  isbn         = {{9783030337773}},
  issn         = {{0302-9743}},
  title        = {{{Feature Selection for Analogy-Based Learning to Rank}}},
  doi          = {{10.1007/978-3-030-33778-0_22}},
  year         = {{2019}},
}

@inbook{15005,
  author       = {{Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke}},
  booktitle    = {{KI 2019: Advances in Artificial Intelligence}},
  isbn         = {{9783030301781}},
  issn         = {{0302-9743}},
  title        = {{{Analogy-Based Preference Learning with Kernels}}},
  doi          = {{10.1007/978-3-030-30179-8_3}},
  year         = {{2019}},
}

@inbook{15006,
  author       = {{Nguyen, Vu-Linh and Destercke, Sébastien and Hüllermeier, Eyke}},
  booktitle    = {{Discovery Science}},
  isbn         = {{9783030337773}},
  issn         = {{0302-9743}},
  title        = {{{Epistemic Uncertainty Sampling}}},
  doi          = {{10.1007/978-3-030-33778-0_7}},
  year         = {{2019}},
}

@inproceedings{15007,
  author       = {{Melnikov, Vitaly and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings ACML, Asian Conference on Machine Learning (Proceedings of Machine Learning Research, 101)}},
  title        = {{{Learning to Aggregate: Tackling the Aggregation/Disaggregation Problem for OWA}}},
  doi          = {{10.1016/j.jmva.2019.02.017}},
  year         = {{2019}},
}

@inproceedings{15011,
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings - 29. Workshop Computational Intelligence, Dortmund, 28. - 29. November 2019}},
  editor       = {{Hoffmann, Frank and Hüllermeier, Eyke and Mikut, Ralf}},
  isbn         = {{978-3-7315-0979-0}},
  location     = {{Dortmund}},
  pages        = {{135--146}},
  publisher    = {{KIT Scientific Publishing, Karlsruhe}},
  title        = {{{Algorithm Selection as Recommendation: From Collaborative Filtering to Dyad Ranking}}},
  year         = {{2019}},
}

@inproceedings{15013,
  author       = {{Brinker, Klaus and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings ECML/PKDD, European Conference on Machine Learning and Knowledge Discovery in Databases}},
  title        = {{{A Reduction of Label Ranking to Multiclass Classification}}},
  year         = {{2019}},
}

@inproceedings{15014,
  author       = {{Hüllermeier, Eyke and Couso, Ines and Diestercke, Sebastian}},
  booktitle    = {{Proceedings SUM 2019, International Conference on Scalable Uncertainty Management}},
  title        = {{{Learning from Imprecise Data: Adjustments of Optimistic and Pessimistic Variants}}},
  year         = {{2019}},
}

@article{15015,
  author       = {{Henzgen, Sascha and Hüllermeier, Eyke}},
  issn         = {{1556-4681}},
  journal      = {{ACM Transactions on Knowledge Discovery from Data}},
  pages        = {{1--36}},
  title        = {{{Mining Rank Data}}},
  doi          = {{10.1145/3363572}},
  year         = {{2019}},
}

@inproceedings{13132,
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke}},
  booktitle    = {{INFORMATIK 2019: 50 Jahre Gesellschaft für Informatik – Informatik für Gesellschaft}},
  location     = {{Kassel}},
  pages        = {{ 273--274 }},
  publisher    = {{Gesellschaft für Informatik e.V.}},
  title        = {{{From Automated to On-The-Fly Machine Learning}}},
  year         = {{2019}},
}

