@inproceedings{24382,
author = {{Gevers, Karina and Schöppner, Volker and Hüllermeier, Eyke}},
location = {{online}},
title = {{{Heated tool butt welding of two different materials – Established methods versus artificial intelligence}}},
year = {{2021}},
}
@article{21004,
abstract = {{Automated machine learning (AutoML) supports the algorithmic construction and data-specific customization of machine learning pipelines, including the selection, combination, and parametrization of machine learning algorithms as main constituents. Generally speaking, AutoML approaches comprise two major components: a search space model and an optimizer for traversing the space. Recent approaches have shown impressive results in the realm of supervised learning, most notably (single-label) classification (SLC). Moreover, first attempts at extending these approaches towards multi-label classification (MLC) have been made. While the space of candidate pipelines is already huge in SLC, the complexity of the search space is raised to an even higher power in MLC. One may wonder, therefore, whether and to what extent optimizers established for SLC can scale to this increased complexity, and how they compare to each other. This paper makes the following contributions: First, we survey existing approaches to AutoML for MLC. Second, we augment these approaches with optimizers not previously tried for MLC. Third, we propose a benchmarking framework that supports a fair and systematic comparison. Fourth, we conduct an extensive experimental study, evaluating the methods on a suite of MLC problems. We find a grammar-based best-first search to compare favorably to other optimizers.}},
author = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
issn = {{0162-8828}},
journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
keywords = {{Automated Machine Learning, Multi Label Classification, Hierarchical Planning, Bayesian Optimization}},
pages = {{1--1}},
title = {{{AutoML for Multi-Label Classification: Overview and Empirical Evaluation}}},
doi = {{10.1109/tpami.2021.3051276}},
year = {{2021}},
}
@article{21092,
abstract = {{Automated Machine Learning (AutoML) seeks to automatically find so-called machine learning pipelines that maximize the prediction performance when being used to train a model on a given dataset. One of the main and yet open challenges in AutoML is an effective use of computational resources: An AutoML process involves the evaluation of many candidate pipelines, which are costly but often ineffective because they are canceled due to a timeout.
In this paper, we present an approach to predict the runtime of two-step machine learning pipelines with up to one pre-processor, which can be used to anticipate whether or not a pipeline will time out. Separate runtime models are trained offline for each algorithm that may be used in a pipeline, and an overall prediction is derived from these models. We empirically show that the approach increases successful evaluations made by an AutoML tool while preserving or even improving on the previously best solutions.}},
author = {{Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke}},
journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
publisher = {{IEEE}},
title = {{{Predicting Machine Learning Pipeline Runtimes in the Context of Automated Machine Learning}}},
year = {{2021}},
}
@inproceedings{21198,
author = {{Hanselle, Jonas Manuel and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
location = {{Delhi, India}},
title = {{{Algorithm Selection as Superset Learning: Constructing Algorithm Selectors from Imprecise Performance Data}}},
year = {{2021}},
}
@article{21535,
author = {{Bengs, Viktor and Busa-Fekete, Róbert and El Mesaoudi-Paul, Adil and Hüllermeier, Eyke}},
journal = {{Journal of Machine Learning Research}},
number = {{7}},
pages = {{1--108}},
title = {{{Preference-based Online Learning with Dueling Bandits: A Survey}}},
volume = {{22}},
year = {{2021}},
}
@inproceedings{21570,
author = {{Tornede, Tanja and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
title = {{{Coevolution of Remaining Useful Lifetime Estimation Pipelines for Automated Predictive Maintenance}}},
year = {{2021}},
}
@inproceedings{23779,
abstract = {{Produktentstehung (PE) bezieht sich auf den Prozess der Planung und Entwicklung eines Produkts sowie der damit verbundenen Dienstleistungen von der ersten Idee bis zur Herstellung und zum Vertrieb. Während dieses Prozesses gibt es zahlreiche Aufgaben, die von menschlichem Fachwissen abhängen und typischerweise von erfahrenen Experten übernommen werden. Da sich das Feld der Künstlichen Intelligenz (KI) immer weiterentwickelt und seinen Weg in den Fertigungssektor findet, gibt es viele Möglichkeiten für eine Anwendung von KI, um bei der Lösung der oben genannten Aufgaben zu helfen. In diesem Paper geben wir einen umfassenden Überblick über den aktuellen Stand der Technik des Einsatzes von KI in der PE.
Im Detail analysieren wir 40 bestehende Surveys zu KI in der PE und 94 Case Studies, um herauszufinden, welche Bereiche der PE von der aktuellen Forschung in diesem Bereich vorrangig adressiert werden, wie ausgereift die diskutierten KI-Methoden sind und inwieweit datenzentrierte Ansätze in der aktuellen Forschung genutzt werden.}},
author = {{Bernijazov, Ruslan and Dicks, Alexander and Dumitrescu, Roman and Foullois, Marc and Hanselle, Jonas Manuel and Hüllermeier, Eyke and Karakaya, Gökce and Ködding, Patrick and Lohweg, Volker and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Panzner, Melina and Soltenborn, Christian}},
booktitle = {{Proceedings of the 30th International Joint Conference on Artificial Intelligence (IJCAI-21)}},
keywords = {{Artificial Intelligence Product Creation Literature Review}},
location = {{Montreal, Kanada}},
title = {{{A Meta-Review on Artiﬁcial Intelligence in Product Creation}}},
year = {{2021}},
}
@inproceedings{22913,
author = {{Hüllermeier, Eyke and Mohr, Felix and Tornede, Alexander and Wever, Marcel Dominik}},
location = {{Bilbao (Virtual)}},
title = {{{Automated Machine Learning, Bounded Rationality, and Rational Metareasoning}}},
year = {{2021}},
}
@inproceedings{22914,
author = {{Mohr, Felix and Wever, Marcel Dominik}},
location = {{Virtual}},
title = {{{Replacing the Ex-Def Baseline in AutoML by Naive AutoML}}},
year = {{2021}},
}
@unpublished{21600,
abstract = {{Many problems in science and engineering require an efficient numerical approximation of integrals
or solutions to differential equations. For systems with rapidly changing dynamics, an equidistant
discretization is often inadvisable as it either results in prohibitively large errors or computational effort. To this end, adaptive schemes, such as solvers based on Runge–Kutta pairs, have been developed
which adapt the step size based on local error estimations at each step. While the classical schemes
apply very generally and are highly efficient on regular systems, they can behave sub-optimal when
an inefficient step rejection mechanism is triggered by structurally complex systems such as chaotic
systems. To overcome these issues, we propose a method to tailor numerical schemes to the problem
class at hand. This is achieved by combining simple, classical quadrature rules or ODE solvers with
data-driven time-stepping controllers. Compared with learning solution operators to ODEs directly,
it generalises better to unseen initial data as our approach employs classical numerical schemes as
base methods. At the same time it can make use of identified structures of a problem class and,
therefore, outperforms state-of-the-art adaptive schemes. Several examples demonstrate superior
efficiency. Source code is available at https://github.com/lueckem/quadrature-ML.}},
author = {{Dellnitz, Michael and Hüllermeier, Eyke and Lücke, Marvin and Ober-Blöbaum, Sina and Offen, Christian and Peitz, Sebastian and Pfannschmidt, Karlson}},
booktitle = {{arXiv:2104.03562}},
title = {{{Efficient time stepping for numerical integration using reinforcement learning}}},
year = {{2021}},
}
@inproceedings{27381,
abstract = {{Graph neural networks (GNNs) have been successfully applied in many structured data domains, with applications ranging from molecular property prediction to the analysis of social networks. Motivated by the broad applicability of GNNs, we propose the family of so-called RankGNNs, a combination of neural Learning to Rank (LtR) methods and GNNs. RankGNNs are trained with a set of pair-wise preferences between graphs, suggesting that one of them is preferred over the other. One practical application of this problem is drug screening, where an expert wants to find the most promising molecules in a large collection of drug candidates. We empirically demonstrate that our proposed pair-wise RankGNN approach either significantly outperforms or at least matches the ranking performance of the naive point-wise baseline approach, in which the LtR problem is solved via GNN-based graph regression.}},
author = {{Damke, Clemens and Hüllermeier, Eyke}},
booktitle = {{Proceedings of The 24th International Conference on Discovery Science (DS 2021)}},
editor = {{Soares, Carlos and Torgo, Luis}},
isbn = {{9783030889418}},
issn = {{0302-9743}},
keywords = {{Graph-structured data, Graph neural networks, Preference learning, Learning to rank}},
location = {{Halifax, Canada}},
pages = {{166--180}},
publisher = {{Springer}},
title = {{{Ranking Structured Objects with Graph Neural Networks}}},
doi = {{10.1007/978-3-030-88942-5}},
volume = {{12986}},
year = {{2021}},
}
@phdthesis{27284,
author = {{Wever, Marcel Dominik}},
title = {{{Automated Machine Learning for Multi-Label Classification}}},
doi = {{10.17619/UNIPB/1-1302}},
year = {{2021}},
}
@inbook{19521,
author = {{Pfannschmidt, Karlson and Hüllermeier, Eyke}},
booktitle = {{Lecture Notes in Computer Science}},
isbn = {{9783030582845}},
issn = {{0302-9743}},
title = {{{Learning Choice Functions via Pareto-Embeddings}}},
doi = {{10.1007/978-3-030-58285-2_30}},
year = {{2020}},
}
@inproceedings{19953,
abstract = {{Current GNN architectures use a vertex neighborhood aggregation scheme, which limits their discriminative power to that of the 1-dimensional Weisfeiler-Lehman (WL) graph isomorphism test. Here, we propose a novel graph convolution operator that is based on the 2-dimensional WL test. We formally show that the resulting 2-WL-GNN architecture is more discriminative than existing GNN approaches. This theoretical result is complemented by experimental studies using synthetic and real data. On multiple common graph classification benchmarks, we demonstrate that the proposed model is competitive with state-of-the-art graph kernels and GNNs.}},
author = {{Damke, Clemens and Melnikov, Vitaly and Hüllermeier, Eyke}},
booktitle = {{Proceedings of the 12th Asian Conference on Machine Learning (ACML 2020)}},
editor = {{Jialin Pan, Sinno and Sugiyama, Masashi}},
keywords = {{graph neural networks, Weisfeiler-Lehman test, cycle detection}},
location = {{Bangkok, Thailand}},
pages = {{49--64}},
publisher = {{PMLR}},
title = {{{A Novel Higher-order Weisfeiler-Lehman Graph Convolution}}},
volume = {{129}},
year = {{2020}},
}
@inproceedings{21534,
author = {{Bengs, Viktor and Hüllermeier, Eyke}},
booktitle = {{International Conference on Machine Learning}},
pages = {{778--787}},
title = {{{Preselection Bandits}}},
year = {{2020}},
}
@unpublished{21536,
abstract = {{We consider a resource-aware variant of the classical multi-armed bandit
problem: In each round, the learner selects an arm and determines a resource
limit. It then observes a corresponding (random) reward, provided the (random)
amount of consumed resources remains below the limit. Otherwise, the
observation is censored, i.e., no reward is obtained. For this problem setting,
we introduce a measure of regret, which incorporates the actual amount of
allocated resources of each learning round as well as the optimality of
realizable rewards. Thus, to minimize regret, the learner needs to set a
resource limit and choose an arm in such a way that the chance to realize a
high reward within the predefined resource limit is high, while the resource
limit itself should be kept as low as possible. We derive the theoretical lower
bound on the cumulative regret and propose a learning algorithm having a regret
upper bound that matches the lower bound. In a simulation study, we show that
our learning algorithm outperforms straightforward extensions of standard
multi-armed bandit algorithms.}},
author = {{Bengs, Viktor and Hüllermeier, Eyke}},
booktitle = {{arXiv:2011.00813}},
title = {{{Multi-Armed Bandits with Censored Consumption of Resources}}},
year = {{2020}},
}
@inproceedings{17407,
author = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{Discovery Science}},
title = {{{Extreme Algorithm Selection with Dyadic Feature Representation}}},
year = {{2020}},
}
@inproceedings{17408,
author = {{Hanselle, Jonas Manuel and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{KI 2020: Advances in Artificial Intelligence}},
title = {{{Hybrid Ranking and Regression for Algorithm Selection}}},
year = {{2020}},
}
@inproceedings{17424,
author = {{Tornede, Tanja and Tornede, Alexander and Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
booktitle = {{Proceedings of the ECMLPKDD 2020}},
title = {{{AutoML for Predictive Maintenance: One Tool to RUL Them All}}},
doi = {{10.1007/978-3-030-66770-2_8}},
year = {{2020}},
}
@unpublished{17605,
abstract = {{Syntactic annotation of corpora in the form of part-of-speech (POS) tags is a key requirement for both linguistic research and subsequent automated natural language processing (NLP) tasks. This problem is commonly tackled using machine learning methods, i.e., by training a POS tagger on a sufficiently large corpus of labeled data.
While the problem of POS tagging can essentially be considered as solved for modern languages, historical corpora turn out to be much more difficult, especially due to the lack of native speakers and sparsity of training data. Moreover, most texts have no sentences as we know them today, nor a common orthography.
These irregularities render the task of automated POS tagging more difficult and error-prone. Under these circumstances, instead of forcing the POS tagger to predict and commit to a single tag, it should be enabled to express its uncertainty. In this paper, we consider POS tagging within the framework of set-valued prediction, which allows the POS tagger to express its uncertainty via predicting a set of candidate POS tags instead of guessing a single one. The goal is to guarantee a high confidence that the correct POS tag is included while keeping the number of candidates small.
In our experimental study, we find that extending state-of-the-art POS taggers to set-valued prediction yields more precise and robust taggings, especially for unknown words, i.e., words not occurring in the training data.}},
author = {{Heid, Stefan Helmut and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{Journal of Data Mining and Digital Humanities}},
publisher = {{episciences}},
title = {{{Reliable Part-of-Speech Tagging of Historical Corpora through Set-Valued Prediction}}},
year = {{2020}},
}
@inproceedings{20306,
author = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{Workshop MetaLearn 2020 @ NeurIPS 2020}},
location = {{Online}},
title = {{{Towards Meta-Algorithm Selection}}},
year = {{2020}},
}
@inbook{18014,
author = {{El Mesaoudi-Paul, Adil and Weiß, Dimitri and Bengs, Viktor and Hüllermeier, Eyke and Tierney, Kevin}},
booktitle = {{Learning and Intelligent Optimization. LION 2020.}},
isbn = {{9783030535513}},
issn = {{0302-9743}},
pages = {{216 -- 232}},
publisher = {{Springer}},
title = {{{Pool-Based Realtime Algorithm Configuration: A Preselection Bandit Approach}}},
doi = {{10.1007/978-3-030-53552-0_22}},
volume = {{12096}},
year = {{2020}},
}
@unpublished{18017,
abstract = {{We consider an extension of the contextual multi-armed bandit problem, in
which, instead of selecting a single alternative (arm), a learner is supposed
to make a preselection in the form of a subset of alternatives. More
specifically, in each iteration, the learner is presented a set of arms and a
context, both described in terms of feature vectors. The task of the learner is
to preselect $k$ of these arms, among which a final choice is made in a second
step. In our setup, we assume that each arm has a latent (context-dependent)
utility, and that feedback on a preselection is produced according to a
Plackett-Luce model. We propose the CPPL algorithm, which is inspired by the
well-known UCB algorithm, and evaluate this algorithm on synthetic and real
data. In particular, we consider an online algorithm selection scenario, which
served as a main motivation of our problem setting. Here, an instance (which
defines the context) from a certain problem class (such as SAT) can be solved
by different algorithms (the arms), but only $k$ of these algorithms can
actually be run.}},
author = {{El Mesaoudi-Paul, Adil and Bengs, Viktor and Hüllermeier, Eyke}},
booktitle = {{arXiv:2002.04275}},
title = {{{Online Preselection with Context Information under the Plackett-Luce Model}}},
year = {{2020}},
}
@inproceedings{18276,
abstract = {{Algorithm selection (AS) deals with the automatic selection of an algorithm
from a fixed set of candidate algorithms most suitable for a specific instance
of an algorithmic problem class, where "suitability" often refers to an
algorithm's runtime. Due to possibly extremely long runtimes of candidate
algorithms, training data for algorithm selection models is usually generated
under time constraints in the sense that not all algorithms are run to
completion on all instances. Thus, training data usually comprises censored
information, as the true runtime of algorithms timed out remains unknown.
However, many standard AS approaches are not able to handle such information in
a proper way. On the other side, survival analysis (SA) naturally supports
censored data and offers appropriate ways to use such data for learning
distributional models of algorithm runtime, as we demonstrate in this work. We
leverage such models as a basis of a sophisticated decision-theoretic approach
to algorithm selection, which we dub Run2Survive. Moreover, taking advantage of
a framework of this kind, we advocate a risk-averse approach to algorithm
selection, in which the avoidance of a timeout is given high priority. In an
extensive experimental study with the standard benchmark ASlib, our approach is
shown to be highly competitive and in many cases even superior to
state-of-the-art AS approaches.}},
author = {{Tornede, Alexander and Wever, Marcel Dominik and Werner, Stefan and Mohr, Felix and Hüllermeier, Eyke}},
booktitle = {{ACML 2020}},
location = {{Bangkok, Thailand}},
title = {{{Run2Survive: A Decision-theoretic Approach to Algorithm Selection based on Survival Analysis}}},
year = {{2020}},
}
@article{16725,
author = {{Richter, Cedric and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
journal = {{Journal of Automated Software Engineering}},
publisher = {{Springer}},
title = {{{Algorithm Selection for Software Validation Based on Graph Kernels}}},
year = {{2020}},
}
@inproceedings{15629,
abstract = {{In multi-label classification (MLC), each instance is associated with a set of class labels, in contrast to standard classification where an instance is assigned a single label. Binary relevance (BR) learning, which reduces a multi-label to a set of binary classification problems, one per label, is arguably the most straight-forward approach to MLC. In spite of its simplicity, BR proved to be competitive to more sophisticated MLC methods, and still achieves state-of-the-art performance for many loss functions. Somewhat surprisingly, the optimal choice of the base learner for tackling the binary classification problems has received very little attention so far. Taking advantage of the label independence assumption inherent to BR, we propose a label-wise base learner selection method optimizing label-wise macro averaged performance measures. In an extensive experimental evaluation, we find that or approach, called LiBRe, can significantly improve generalization performance.}},
author = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
location = {{Konstanz, Germany}},
publisher = {{Springer}},
title = {{{LiBRe: Label-Wise Selection of Base Learners in Binary Relevance for Multi-Label Classification}}},
year = {{2020}},
}
@article{15025,
abstract = {{In software engineering, the imprecise requirements of a user are transformed to a formal requirements specification during the requirements elicitation process. This process is usually guided by requirements engineers interviewing the user. We want to partially automate this first step of the software engineering process in order to enable users to specify a desired software system on their own. With our approach, users are only asked to provide exemplary behavioral descriptions. The problem of synthesizing a requirements specification from examples can partially be reduced to the problem of grammatical inference, to which we apply an active coevolutionary learning approach. However, this approach would usually require many feedback queries to be sent to the user. In this work, we extend and generalize our active learning approach to receive knowledge from multiple oracles, also known as proactive learning. The ‘user oracle’ represents input received from the user and the ‘knowledge oracle’ represents available, formalized domain knowledge. We call our two-oracle approach the ‘first apply knowledge then query’ (FAKT/Q) algorithm. We compare FAKT/Q to the active learning approach and provide an extensive benchmark evaluation. As result we find that the number of required user queries is reduced and the inference process is sped up significantly. Finally, with so-called On-The-Fly Markets, we present a motivation and an application of our approach where such knowledge is available.}},
author = {{Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko}},
journal = {{Evolutionary Computation}},
number = {{2}},
pages = {{165–193}},
publisher = {{MIT Press Journals}},
title = {{{Multi-Oracle Coevolutionary Learning of Requirements Specifications from Examples in On-The-Fly Markets}}},
doi = {{10.1162/evco_a_00266}},
volume = {{28}},
year = {{2020}},
}
@unpublished{19523,
abstract = {{We study the problem of learning choice functions, which play an important
role in various domains of application, most notably in the field of economics.
Formally, a choice function is a mapping from sets to sets: Given a set of
choice alternatives as input, a choice function identifies a subset of most
preferred elements. Learning choice functions from suitable training data comes
with a number of challenges. For example, the sets provided as input and the
subsets produced as output can be of any size. Moreover, since the order in
which alternatives are presented is irrelevant, a choice function should be
symmetric. Perhaps most importantly, choice functions are naturally
context-dependent, in the sense that the preference in favor of an alternative
may depend on what other options are available. We formalize the problem of
learning choice functions and present two general approaches based on two
representations of context-dependent utility functions. Both approaches are
instantiated by means of appropriate neural network architectures, and their
performance is demonstrated on suitable benchmark tasks.}},
author = {{Pfannschmidt, Karlson and Gupta, Pritha and Hüllermeier, Eyke}},
booktitle = {{arXiv:1901.10860}},
title = {{{Learning Choice Functions: Concepts and Architectures}}},
year = {{2019}},
}
@article{17565,
author = {{Merten, Marie-Luis and Seemann, Nina and Wever, Marcel Dominik}},
journal = {{Niederdeutsches Jahrbuch}},
number = {{142}},
pages = {{124--146}},
title = {{{Grammatikwandel digital-kulturwissenschaftlich erforscht. Mittelniederdeutscher Sprachausbau im interdisziplinären Zugriff}}},
year = {{2019}},
}
@unpublished{18018,
abstract = {{A common statistical task lies in showing asymptotic normality of certain
statistics. In many of these situations, classical textbook results on weak
convergence theory suffice for the problem at hand. However, there are quite
some scenarios where stronger results are needed in order to establish an
asymptotic normal approximation uniformly over a family of probability
measures. In this note we collect some results in this direction. We restrict
ourselves to weak convergence in $\mathbb R^d$ with continuous limit measures.}},
author = {{Bengs, Viktor and Holzmann, Hajo}},
booktitle = {{arXiv:1903.09864}},
title = {{{Uniform approximation in classical weak convergence theory}}},
year = {{2019}},
}
@inproceedings{8868,
author = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke and Hetzer, Alexander}},
location = {{Bayreuth, Germany}},
title = {{{Towards Automated Machine Learning for Multi-Label Classification}}},
year = {{2019}},
}
@article{10578,
author = {{Tagne, V. K. and Fotso, S. and Fono, L. A. and Hüllermeier, Eyke}},
journal = {{New Mathematics and Natural Computation}},
number = {{2}},
pages = {{191--213}},
title = {{{Choice Functions Generated by Mallows and Plackett–Luce Relations}}},
volume = {{15}},
year = {{2019}},
}
@article{15001,
author = {{Couso, Ines and Borgelt, Christian and Hüllermeier, Eyke and Kruse, Rudolf}},
issn = {{1556-603X}},
journal = {{IEEE Computational Intelligence Magazine}},
pages = {{31--44}},
title = {{{Fuzzy Sets in Data Analysis: From Statistical Foundations to Machine Learning}}},
doi = {{10.1109/mci.2018.2881642}},
year = {{2019}},
}
@article{15002,
abstract = {{Many problem settings in machine learning are concerned with the simultaneous prediction of multiple target variables of diverse type. Amongst others, such problem settings arise in multivariate regression, multi-label classification, multi-task learning, dyadic prediction, zero-shot learning, network inference, and matrix completion. These subfields of machine learning are typically studied in isolation, without highlighting or exploring important relationships. In this paper, we present a unifying view on what we call multi-target prediction (MTP) problems and methods. First, we formally discuss commonalities and differences between existing MTP problems. To this end, we introduce a general framework that covers the above subfields as special cases. As a second contribution, we provide a structured overview of MTP methods. This is accomplished by identifying a number of key properties, which distinguish such methods and determine their suitability for different types of problems. Finally, we also discuss a few challenges for future research.}},
author = {{Waegeman, Willem and Dembczynski, Krzysztof and Hüllermeier, Eyke}},
issn = {{1573-756X}},
journal = {{Data Mining and Knowledge Discovery}},
number = {{2}},
pages = {{293--324}},
title = {{{Multi-target prediction: a unifying view on problems and methods}}},
doi = {{10.1007/s10618-018-0595-5}},
volume = {{33}},
year = {{2019}},
}
@inproceedings{15003,
author = {{Mortier, Thomas and Wydmuch, Marek and Dembczynski, Krzysztof and Hüllermeier, Eyke and Waegeman, Willem}},
booktitle = {{Proceedings of the 31st Benelux Conference on Artificial Intelligence {(BNAIC} 2019) and the 28th Belgian Dutch Conference on Machine Learning (Benelearn 2019), Brussels, Belgium, November 6-8, 2019}},
title = {{{Set-Valued Prediction in Multi-Class Classification}}},
year = {{2019}},
}
@inbook{15004,
author = {{Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke}},
booktitle = {{Discovery Science}},
isbn = {{9783030337773}},
issn = {{0302-9743}},
title = {{{Feature Selection for Analogy-Based Learning to Rank}}},
doi = {{10.1007/978-3-030-33778-0_22}},
year = {{2019}},
}
@inbook{15005,
author = {{Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke}},
booktitle = {{KI 2019: Advances in Artificial Intelligence}},
isbn = {{9783030301781}},
issn = {{0302-9743}},
title = {{{Analogy-Based Preference Learning with Kernels}}},
doi = {{10.1007/978-3-030-30179-8_3}},
year = {{2019}},
}
@inbook{15006,
author = {{Nguyen, Vu-Linh and Destercke, Sébastien and Hüllermeier, Eyke}},
booktitle = {{Discovery Science}},
isbn = {{9783030337773}},
issn = {{0302-9743}},
title = {{{Epistemic Uncertainty Sampling}}},
doi = {{10.1007/978-3-030-33778-0_7}},
year = {{2019}},
}
@inproceedings{15007,
author = {{Melnikov, Vitaly and Hüllermeier, Eyke}},
booktitle = {{Proceedings ACML, Asian Conference on Machine Learning (Proceedings of Machine Learning Research, 101)}},
title = {{{Learning to Aggregate: Tackling the Aggregation/Disaggregation Problem for OWA}}},
doi = {{10.1016/j.jmva.2019.02.017}},
year = {{2019}},
}
@inproceedings{15009,
author = {{Epple, Nico and Dari, Simone and Drees, Ludwig and Protschky, Valentin and Riener, Andreas}},
booktitle = {{2019 IEEE Intelligent Vehicles Symposium (IV)}},
isbn = {{9781728105604}},
title = {{{Influence of Cruise Control on Driver Guidance - a Comparison between System Generations and Countries}}},
doi = {{10.1109/ivs.2019.8814100}},
year = {{2019}},
}
@inproceedings{15011,
author = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
booktitle = {{Proceedings - 29. Workshop Computational Intelligence, Dortmund, 28. - 29. November 2019}},
editor = {{Hoffmann, Frank and Hüllermeier, Eyke and Mikut, Ralf}},
isbn = {{978-3-7315-0979-0}},
location = {{Dortmund}},
pages = {{135--146}},
publisher = {{KIT Scientific Publishing, Karlsruhe}},
title = {{{Algorithm Selection as Recommendation: From Collaborative Filtering to Dyad Ranking}}},
year = {{2019}},
}
@inproceedings{15013,
author = {{Brinker, Klaus and Hüllermeier, Eyke}},
booktitle = {{Proceedings ECML/PKDD, European Conference on Machine Learning and Knowledge Discovery in Databases}},
title = {{{A Reduction of Label Ranking to Multiclass Classification}}},
year = {{2019}},
}
@inproceedings{15014,
author = {{Hüllermeier, Eyke and Couso, Ines and Diestercke, Sebastian}},
booktitle = {{Proceedings SUM 2019, International Conference on Scalable Uncertainty Management}},
title = {{{Learning from Imprecise Data: Adjustments of Optimistic and Pessimistic Variants}}},
year = {{2019}},
}
@article{15015,
author = {{Henzgen, Sascha and Hüllermeier, Eyke}},
issn = {{1556-4681}},
journal = {{ACM Transactions on Knowledge Discovery from Data}},
pages = {{1--36}},
title = {{{Mining Rank Data}}},
doi = {{10.1145/3363572}},
year = {{2019}},
}
@article{14027,
author = {{Bengs, Viktor and Eulert, Matthias and Holzmann, Hajo}},
issn = {{0047-259X}},
journal = {{Journal of Multivariate Analysis}},
pages = {{291--312}},
title = {{{Asymptotic confidence sets for the jump curve in bivariate regression problems}}},
doi = {{10.1016/j.jmva.2019.02.017}},
year = {{2019}},
}
@article{14028,
author = {{Bengs, Viktor and Holzmann, Hajo}},
issn = {{1935-7524}},
journal = {{Electronic Journal of Statistics}},
pages = {{1523--1579}},
title = {{{Adaptive confidence sets for kink estimation}}},
doi = {{10.1214/19-ejs1555}},
year = {{2019}},
}
@inproceedings{13132,
author = {{Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke}},
booktitle = {{INFORMATIK 2019: 50 Jahre Gesellschaft für Informatik – Informatik für Gesellschaft}},
location = {{Kassel}},
pages = {{ 273--274 }},
publisher = {{Gesellschaft für Informatik e.V.}},
title = {{{From Automated to On-The-Fly Machine Learning}}},
year = {{2019}},
}
@inproceedings{10232,
abstract = {{Existing tools for automated machine learning, such as Auto-WEKA, TPOT, auto-sklearn, and more recently ML-Plan, have shown impressive results for the tasks of single-label classification and regression. Yet, there is only little work on other types of machine learning problems so far. In particular, there is almost no work on automating the engineering of machine learning solutions for multi-label classification (MLC). We show how the scope of ML-Plan, an AutoML-tool for multi-class classification, can be extended towards MLC using MEKA, which is a multi-label extension of the well-known Java library WEKA. The resulting approach recursively refines MEKA's multi-label classifiers, nesting other multi-label classifiers for meta algorithms and single-label classifiers provided by WEKA as base learners. In our evaluation, we find that the proposed approach yields strong results and performs significantly better than a set of baselines we compare with.}},
author = {{Wever, Marcel Dominik and Mohr, Felix and Tornede, Alexander and Hüllermeier, Eyke}},
location = {{Long Beach, CA, USA}},
title = {{{Automating Multi-Label Classification Extending ML-Plan}}},
year = {{2019}},
}
@inproceedings{2479,
author = {{Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke and Faez, Amin}},
booktitle = {{SCC}},
location = {{San Francisco, CA, USA}},
publisher = {{IEEE}},
title = {{{(WIP) Towards the Automated Composition of Machine Learning Services}}},
doi = {{10.1109/SCC.2018.00039}},
year = {{2018}},
}
@unpublished{19524,
abstract = {{Object ranking is an important problem in the realm of preference learning.
On the basis of training data in the form of a set of rankings of objects,
which are typically represented as feature vectors, the goal is to learn a
ranking function that predicts a linear order of any new set of objects.
Current approaches commonly focus on ranking by scoring, i.e., on learning an
underlying latent utility function that seeks to capture the inherent utility
of each object. These approaches, however, are not able to take possible
effects of context-dependence into account, where context-dependence means that
the utility or usefulness of an object may also depend on what other objects
are available as alternatives. In this paper, we formalize the problem of
context-dependent ranking and present two general approaches based on two
natural representations of context-dependent ranking functions. Both approaches
are instantiated by means of appropriate neural network architectures, which
are evaluated on suitable benchmark task.}},
author = {{Pfannschmidt, Karlson and Gupta, Pritha and Hüllermeier, Eyke}},
booktitle = {{arXiv:1803.05796}},
title = {{{Deep Architectures for Learning Context-dependent Ranking Functions}}},
year = {{2018}},
}