@inproceedings{17424,
  author       = {{Tornede, Tanja and Tornede, Alexander and Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of the ECMLPKDD 2020}},
  title        = {{{AutoML for Predictive Maintenance: One Tool to RUL Them All}}},
  doi          = {{10.1007/978-3-030-66770-2_8}},
  year         = {{2020}},
}

@inproceedings{20306,
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{Workshop MetaLearn 2020 @ NeurIPS 2020}},
  location     = {{Online}},
  title        = {{{Towards Meta-Algorithm Selection}}},
  year         = {{2020}},
}

@inproceedings{18276,
  abstract     = {{Algorithm selection (AS) deals with the automatic selection of an algorithm
from a fixed set of candidate algorithms most suitable for a specific instance
of an algorithmic problem class, where "suitability" often refers to an
algorithm's runtime. Due to possibly extremely long runtimes of candidate
algorithms, training data for algorithm selection models is usually generated
under time constraints in the sense that not all algorithms are run to
completion on all instances. Thus, training data usually comprises censored
information, as the true runtime of algorithms timed out remains unknown.
However, many standard AS approaches are not able to handle such information in
a proper way. On the other side, survival analysis (SA) naturally supports
censored data and offers appropriate ways to use such data for learning
distributional models of algorithm runtime, as we demonstrate in this work. We
leverage such models as a basis of a sophisticated decision-theoretic approach
to algorithm selection, which we dub Run2Survive. Moreover, taking advantage of
a framework of this kind, we advocate a risk-averse approach to algorithm
selection, in which the avoidance of a timeout is given high priority. In an
extensive experimental study with the standard benchmark ASlib, our approach is
shown to be highly competitive and in many cases even superior to
state-of-the-art AS approaches.}},
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Werner, Stefan and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{ACML 2020}},
  location     = {{Bangkok, Thailand}},
  title        = {{{Run2Survive: A Decision-theoretic Approach to Algorithm Selection based on Survival Analysis}}},
  year         = {{2020}},
}

@inproceedings{15629,
  abstract     = {{In multi-label classification (MLC), each instance is associated with a set of class labels, in contrast to standard classification where an instance is assigned a single label. Binary relevance (BR) learning, which reduces a multi-label to a set of binary classification problems, one per label, is arguably the most straight-forward approach to MLC. In spite of its simplicity, BR proved to be competitive to more sophisticated MLC methods, and still achieves state-of-the-art performance for many loss functions. Somewhat surprisingly, the optimal choice of the base learner for tackling the binary classification problems has received very little attention so far. Taking advantage of the label independence assumption inherent to BR, we propose a label-wise base learner selection method optimizing label-wise macro averaged performance measures. In an extensive experimental evaluation, we find that or approach, called LiBRe, can significantly improve generalization performance.}},
  author       = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
  location     = {{Konstanz, Germany}},
  publisher    = {{Springer}},
  title        = {{{LiBRe: Label-Wise Selection of Base Learners in Binary Relevance for Multi-Label Classification}}},
  year         = {{2020}},
}

@article{15025,
  abstract     = {{In software engineering, the imprecise requirements of a user are transformed to a formal requirements specification during the requirements elicitation process. This process is usually guided by requirements engineers interviewing the user. We want to partially automate this first step of the software engineering process in order to enable users to specify a desired software system on their own. With our approach, users are only asked to provide exemplary behavioral descriptions. The problem of synthesizing a requirements specification from examples can partially be reduced to the problem of grammatical inference, to which we apply an active coevolutionary learning approach. However, this approach would usually require many feedback queries to be sent to the user. In this work, we extend and generalize our active learning approach to receive knowledge from multiple oracles, also known as proactive learning. The ‘user oracle’ represents input received from the user and the ‘knowledge oracle’ represents available, formalized domain knowledge. We call our two-oracle approach the ‘first apply knowledge then query’ (FAKT/Q) algorithm. We compare FAKT/Q to the active learning approach and provide an extensive benchmark evaluation. As result we find that the number of required user queries is reduced and the inference process is sped up significantly. Finally, with so-called On-The-Fly Markets, we present a motivation and an application of our approach where such knowledge is available.}},
  author       = {{Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko}},
  journal      = {{Evolutionary Computation}},
  number       = {{2}},
  pages        = {{165–193}},
  publisher    = {{MIT Press Journals}},
  title        = {{{Multi-Oracle Coevolutionary Learning of Requirements Specifications from Examples in On-The-Fly Markets}}},
  doi          = {{10.1162/evco_a_00266}},
  volume       = {{28}},
  year         = {{2020}},
}

@phdthesis{16935,
  author       = {{Moussalem, Diego Campos}},
  title        = {{{Knowledge Graphs for Multilingual Language Translation and Generation}}},
  doi          = {{10.17619/UNIPB/1-980}},
  year         = {{2020}},
}

@article{13770,
  author       = {{Karl, Holger and Kundisch, Dennis and Meyer auf der Heide, Friedhelm and Wehrheim, Heike}},
  journal      = {{Business & Information Systems Engineering}},
  number       = {{6}},
  pages        = {{467--481}},
  publisher    = {{Springer}},
  title        = {{{A Case for a New IT Ecosystem: On-The-Fly Computing}}},
  doi          = {{10.1007/s12599-019-00627-x}},
  volume       = {{62}},
  year         = {{2020}},
}

@inproceedings{8868,
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke and Hetzer, Alexander}},
  location     = {{Bayreuth, Germany}},
  title        = {{{Towards Automated Machine Learning for Multi-Label Classification}}},
  year         = {{2019}},
}

@inproceedings{15007,
  author       = {{Melnikov, Vitaly and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings ACML, Asian Conference on Machine Learning (Proceedings of Machine Learning Research, 101)}},
  title        = {{{Learning to Aggregate: Tackling the Aggregation/Disaggregation Problem for OWA}}},
  doi          = {{10.1016/j.jmva.2019.02.017}},
  year         = {{2019}},
}

@inproceedings{15011,
  author       = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings - 29. Workshop Computational Intelligence, Dortmund, 28. - 29. November 2019}},
  editor       = {{Hoffmann, Frank and Hüllermeier, Eyke and Mikut, Ralf}},
  isbn         = {{978-3-7315-0979-0}},
  location     = {{Dortmund}},
  pages        = {{135--146}},
  publisher    = {{KIT Scientific Publishing, Karlsruhe}},
  title        = {{{Algorithm Selection as Recommendation: From Collaborative Filtering to Dyad Ranking}}},
  year         = {{2019}},
}

@inproceedings{13132,
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke}},
  booktitle    = {{INFORMATIK 2019: 50 Jahre Gesellschaft für Informatik – Informatik für Gesellschaft}},
  location     = {{Kassel}},
  pages        = {{ 273--274 }},
  publisher    = {{Gesellschaft für Informatik e.V.}},
  title        = {{{From Automated to On-The-Fly Machine Learning}}},
  year         = {{2019}},
}

@inproceedings{10232,
  abstract     = {{Existing tools for automated machine learning, such as Auto-WEKA, TPOT, auto-sklearn, and more recently ML-Plan, have shown impressive results for the tasks of single-label classification and regression. Yet, there is only little work on other types of machine learning problems so far. In particular, there is almost no work on automating the engineering of machine learning solutions for multi-label classification (MLC). We show how the scope of ML-Plan, an AutoML-tool for multi-class classification, can be extended towards MLC using MEKA, which is a multi-label extension of the well-known Java library WEKA. The resulting approach recursively refines MEKA's multi-label classifiers, nesting other multi-label classifiers for meta algorithms and single-label classifiers provided by WEKA as base learners. In our evaluation, we find that the proposed approach yields strong results and performs significantly better than a set of baselines we compare with.}},
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Tornede, Alexander and Hüllermeier, Eyke}},
  location     = {{Long Beach, CA, USA}},
  title        = {{{Automating Multi-Label Classification Extending ML-Plan}}},
  year         = {{2019}},
}

@inproceedings{2479,
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke and Faez, Amin}},
  booktitle    = {{SCC}},
  location     = {{San Francisco, CA, USA}},
  publisher    = {{IEEE}},
  title        = {{{(WIP) Towards the Automated Composition of Machine Learning Services}}},
  doi          = {{10.1109/SCC.2018.00039}},
  year         = {{2018}},
}

@inproceedings{2857,
  author       = {{Mohr, Felix and Lettmann, Theodor and Hüllermeier, Eyke and Wever, Marcel Dominik}},
  booktitle    = {{Proceedings of the 1st ICAPS Workshop on Hierarchical Planning}},
  location     = {{Delft, Netherlands}},
  pages        = {{31--39}},
  publisher    = {{AAAI}},
  title        = {{{Programmatic Task Network Planning}}},
  year         = {{2018}},
}

@inproceedings{2471,
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{SCC}},
  location     = {{San Francisco, CA, USA}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{On-The-Fly Service Construction with Prototypes}}},
  doi          = {{10.1109/SCC.2018.00036}},
  year         = {{2018}},
}

@article{3510,
  abstract     = {{Automated machine learning (AutoML) seeks to automatically select, compose, and parametrize machine learning algorithms, so as to achieve optimal performance on a given task (dataset). Although current approaches to AutoML have already produced impressive results, the field is still far from mature, and new techniques are still being developed. In this paper, we present ML-Plan, a new approach to AutoML based on hierarchical planning. To highlight the potential of this approach, we compare ML-Plan to the state-of-the-art frameworks Auto-WEKA, auto-sklearn, and TPOT. In an extensive series of experiments, we show that ML-Plan is highly competitive and often outperforms existing approaches.}},
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  issn         = {{1573-0565}},
  journal      = {{Machine Learning}},
  keywords     = {{AutoML, Hierarchical Planning, HTN planning, ML-Plan}},
  location     = {{Dublin, Ireland}},
  pages        = {{1495--1515}},
  publisher    = {{Springer}},
  title        = {{{ML-Plan: Automated Machine Learning via Hierarchical Planning}}},
  doi          = {{10.1007/s10994-018-5735-z}},
  year         = {{2018}},
}

@inproceedings{3552,
  author       = {{Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of the Symposium on Intelligent Data Analysis}},
  location     = {{‘s-Hertogenbosch, the Netherlands}},
  title        = {{{Reduction Stumps for Multi-Class Classification}}},
  doi          = {{10.1007/978-3-030-01768-2_19}},
  year         = {{2018}},
}

@inproceedings{3852,
  abstract     = {{In automated machine learning (AutoML), the process of engineering machine learning applications with respect to a specific problem is (partially) automated.
Various AutoML tools have already been introduced to provide out-of-the-box machine learning functionality.
More specifically, by selecting machine learning algorithms and optimizing their hyperparameters, these tools produce a machine learning pipeline tailored to the problem at hand.
Except for TPOT, all of these tools restrict the maximum number of processing steps of such a pipeline.
However, as TPOT follows an evolutionary approach, it suffers from performance issues when dealing with larger datasets.
In this paper, we present an alternative approach leveraging a hierarchical planning to configure machine learning pipelines that are unlimited in length.
We evaluate our approach and find its performance to be competitive with other AutoML tools, including TPOT.}},
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{ICML 2018 AutoML Workshop}},
  keywords     = {{automated machine learning, complex pipelines, hierarchical planning}},
  location     = {{Stockholm, Sweden}},
  title        = {{{ML-Plan for Unlimited-Length Machine Learning Pipelines}}},
  year         = {{2018}},
}

@inproceedings{2109,
  abstract     = {{In multinomial classification, reduction techniques are commonly used to decompose the original learning problem into several simpler problems. For example, by recursively bisecting the original set of classes, so-called nested dichotomies define a set of binary classification problems that are organized in the structure of a binary tree. In contrast to the existing one-shot heuristics for constructing nested dichotomies and motivated by recent work on algorithm configuration, we propose a genetic algorithm for optimizing the structure of such dichotomies. A key component of this approach is the proposed genetic representation that facilitates the application of standard genetic operators, while still supporting the exchange of partial solutions under recombination. We evaluate the approach in an extensive experimental study, showing that it yields classifiers with superior generalization performance.}},
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference, GECCO 2018, Kyoto, Japan, July 15-19, 2018}},
  keywords     = {{Classification, Hierarchical Decomposition, Indirect Encoding}},
  location     = {{Kyoto, Japan}},
  publisher    = {{ACM}},
  title        = {{{Ensembles of Evolved Nested Dichotomies for Classification}}},
  doi          = {{10.1145/3205455.3205562}},
  year         = {{2018}},
}

@unpublished{17713,
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
  publisher    = {{Arxiv}},
  title        = {{{Automated Multi-Label Classification based on ML-Plan}}},
  year         = {{2018}},
}

