@phdthesis{45780, author = {{Tornede, Alexander}}, title = {{{Advanced Algorithm Selection with Machine Learning: Handling Large Algorithm Sets, Learning From Censored Data, and Simplyfing Meta Level Decisions}}}, doi = {{10.17619/UNIPB/1-1780 }}, year = {{2023}}, } @article{21600, abstract = {{Many problems in science and engineering require an efficient numerical approximation of integrals or solutions to differential equations. For systems with rapidly changing dynamics, an equidistant discretization is often inadvisable as it results in prohibitively large errors or computational effort. To this end, adaptive schemes, such as solvers based on Runge–Kutta pairs, have been developed which adapt the step size based on local error estimations at each step. While the classical schemes apply very generally and are highly efficient on regular systems, they can behave suboptimally when an inefficient step rejection mechanism is triggered by structurally complex systems such as chaotic systems. To overcome these issues, we propose a method to tailor numerical schemes to the problem class at hand. This is achieved by combining simple, classical quadrature rules or ODE solvers with data-driven time-stepping controllers. Compared with learning solution operators to ODEs directly, it generalizes better to unseen initial data as our approach employs classical numerical schemes as base methods. At the same time it can make use of identified structures of a problem class and, therefore, outperforms state-of-the-art adaptive schemes. Several examples demonstrate superior efficiency. Source code is available at https://github.com/lueckem/quadrature-ML.}}, author = {{Dellnitz, Michael and Hüllermeier, Eyke and Lücke, Marvin and Ober-Blöbaum, Sina and Offen, Christian and Peitz, Sebastian and Pfannschmidt, Karlson}}, journal = {{SIAM Journal on Scientific Computing}}, number = {{2}}, pages = {{A579--A595}}, title = {{{Efficient time stepping for numerical integration using reinforcement learning}}}, doi = {{10.1137/21M1412682}}, volume = {{45}}, year = {{2023}}, } @inproceedings{24382, author = {{Gevers, Karina and Schöppner, Volker and Hüllermeier, Eyke}}, location = {{online}}, title = {{{Heated tool butt welding of two different materials – Established methods versus artificial intelligence}}}, year = {{2021}}, } @article{21004, abstract = {{Automated machine learning (AutoML) supports the algorithmic construction and data-specific customization of machine learning pipelines, including the selection, combination, and parametrization of machine learning algorithms as main constituents. Generally speaking, AutoML approaches comprise two major components: a search space model and an optimizer for traversing the space. Recent approaches have shown impressive results in the realm of supervised learning, most notably (single-label) classification (SLC). Moreover, first attempts at extending these approaches towards multi-label classification (MLC) have been made. While the space of candidate pipelines is already huge in SLC, the complexity of the search space is raised to an even higher power in MLC. One may wonder, therefore, whether and to what extent optimizers established for SLC can scale to this increased complexity, and how they compare to each other. This paper makes the following contributions: First, we survey existing approaches to AutoML for MLC. Second, we augment these approaches with optimizers not previously tried for MLC. Third, we propose a benchmarking framework that supports a fair and systematic comparison. Fourth, we conduct an extensive experimental study, evaluating the methods on a suite of MLC problems. We find a grammar-based best-first search to compare favorably to other optimizers.}}, author = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}}, issn = {{0162-8828}}, journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}}, keywords = {{Automated Machine Learning, Multi Label Classification, Hierarchical Planning, Bayesian Optimization}}, pages = {{1--1}}, title = {{{AutoML for Multi-Label Classification: Overview and Empirical Evaluation}}}, doi = {{10.1109/tpami.2021.3051276}}, year = {{2021}}, } @article{21092, abstract = {{Automated Machine Learning (AutoML) seeks to automatically find so-called machine learning pipelines that maximize the prediction performance when being used to train a model on a given dataset. One of the main and yet open challenges in AutoML is an effective use of computational resources: An AutoML process involves the evaluation of many candidate pipelines, which are costly but often ineffective because they are canceled due to a timeout. In this paper, we present an approach to predict the runtime of two-step machine learning pipelines with up to one pre-processor, which can be used to anticipate whether or not a pipeline will time out. Separate runtime models are trained offline for each algorithm that may be used in a pipeline, and an overall prediction is derived from these models. We empirically show that the approach increases successful evaluations made by an AutoML tool while preserving or even improving on the previously best solutions.}}, author = {{Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke}}, journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}}, publisher = {{IEEE}}, title = {{{Predicting Machine Learning Pipeline Runtimes in the Context of Automated Machine Learning}}}, year = {{2021}}, } @article{21535, author = {{Bengs, Viktor and Busa-Fekete, Róbert and El Mesaoudi-Paul, Adil and Hüllermeier, Eyke}}, journal = {{Journal of Machine Learning Research}}, number = {{7}}, pages = {{1--108}}, title = {{{Preference-based Online Learning with Dueling Bandits: A Survey}}}, volume = {{22}}, year = {{2021}}, } @inproceedings{21570, author = {{Tornede, Tanja and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, title = {{{Coevolution of Remaining Useful Lifetime Estimation Pipelines for Automated Predictive Maintenance}}}, year = {{2021}}, } @inproceedings{23779, abstract = {{Produktentstehung (PE) bezieht sich auf den Prozess der Planung und Entwicklung eines Produkts sowie der damit verbundenen Dienstleistungen von der ersten Idee bis zur Herstellung und zum Vertrieb. Während dieses Prozesses gibt es zahlreiche Aufgaben, die von menschlichem Fachwissen abhängen und typischerweise von erfahrenen Experten übernommen werden. Da sich das Feld der Künstlichen Intelligenz (KI) immer weiterentwickelt und seinen Weg in den Fertigungssektor findet, gibt es viele Möglichkeiten für eine Anwendung von KI, um bei der Lösung der oben genannten Aufgaben zu helfen. In diesem Paper geben wir einen umfassenden Überblick über den aktuellen Stand der Technik des Einsatzes von KI in der PE. Im Detail analysieren wir 40 bestehende Surveys zu KI in der PE und 94 Case Studies, um herauszufinden, welche Bereiche der PE von der aktuellen Forschung in diesem Bereich vorrangig adressiert werden, wie ausgereift die diskutierten KI-Methoden sind und inwieweit datenzentrierte Ansätze in der aktuellen Forschung genutzt werden.}}, author = {{Bernijazov, Ruslan and Dicks, Alexander and Dumitrescu, Roman and Foullois, Marc and Hanselle, Jonas Manuel and Hüllermeier, Eyke and Karakaya, Gökce and Ködding, Patrick and Lohweg, Volker and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Panzner, Melina and Soltenborn, Christian}}, booktitle = {{Proceedings of the 30th International Joint Conference on Artificial Intelligence (IJCAI-21)}}, keywords = {{Artificial Intelligence Product Creation Literature Review}}, location = {{Montreal, Kanada}}, title = {{{A Meta-Review on Artificial Intelligence in Product Creation}}}, year = {{2021}}, } @inproceedings{22913, author = {{Hüllermeier, Eyke and Mohr, Felix and Tornede, Alexander and Wever, Marcel Dominik}}, location = {{Bilbao (Virtual)}}, title = {{{Automated Machine Learning, Bounded Rationality, and Rational Metareasoning}}}, year = {{2021}}, } @inproceedings{22914, author = {{Mohr, Felix and Wever, Marcel Dominik}}, location = {{Virtual}}, title = {{{Replacing the Ex-Def Baseline in AutoML by Naive AutoML}}}, year = {{2021}}, } @inproceedings{27381, abstract = {{Graph neural networks (GNNs) have been successfully applied in many structured data domains, with applications ranging from molecular property prediction to the analysis of social networks. Motivated by the broad applicability of GNNs, we propose the family of so-called RankGNNs, a combination of neural Learning to Rank (LtR) methods and GNNs. RankGNNs are trained with a set of pair-wise preferences between graphs, suggesting that one of them is preferred over the other. One practical application of this problem is drug screening, where an expert wants to find the most promising molecules in a large collection of drug candidates. We empirically demonstrate that our proposed pair-wise RankGNN approach either significantly outperforms or at least matches the ranking performance of the naive point-wise baseline approach, in which the LtR problem is solved via GNN-based graph regression.}}, author = {{Damke, Clemens and Hüllermeier, Eyke}}, booktitle = {{Proceedings of The 24th International Conference on Discovery Science (DS 2021)}}, editor = {{Soares, Carlos and Torgo, Luis}}, isbn = {{9783030889418}}, issn = {{0302-9743}}, keywords = {{Graph-structured data, Graph neural networks, Preference learning, Learning to rank}}, location = {{Halifax, Canada}}, pages = {{166--180}}, publisher = {{Springer}}, title = {{{Ranking Structured Objects with Graph Neural Networks}}}, doi = {{10.1007/978-3-030-88942-5}}, volume = {{12986}}, year = {{2021}}, } @phdthesis{27284, author = {{Wever, Marcel Dominik}}, title = {{{Automated Machine Learning for Multi-Label Classification}}}, doi = {{10.17619/UNIPB/1-1302}}, year = {{2021}}, } @inproceedings{21198, author = {{Hanselle, Jonas Manuel and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}}, location = {{Delhi, India}}, title = {{{Algorithm Selection as Superset Learning: Constructing Algorithm Selectors from Imprecise Performance Data}}}, year = {{2021}}, } @inbook{19521, author = {{Pfannschmidt, Karlson and Hüllermeier, Eyke}}, booktitle = {{Lecture Notes in Computer Science}}, isbn = {{9783030582845}}, issn = {{0302-9743}}, title = {{{Learning Choice Functions via Pareto-Embeddings}}}, doi = {{10.1007/978-3-030-58285-2_30}}, year = {{2020}}, } @inproceedings{19953, abstract = {{Current GNN architectures use a vertex neighborhood aggregation scheme, which limits their discriminative power to that of the 1-dimensional Weisfeiler-Lehman (WL) graph isomorphism test. Here, we propose a novel graph convolution operator that is based on the 2-dimensional WL test. We formally show that the resulting 2-WL-GNN architecture is more discriminative than existing GNN approaches. This theoretical result is complemented by experimental studies using synthetic and real data. On multiple common graph classification benchmarks, we demonstrate that the proposed model is competitive with state-of-the-art graph kernels and GNNs.}}, author = {{Damke, Clemens and Melnikov, Vitaly and Hüllermeier, Eyke}}, booktitle = {{Proceedings of the 12th Asian Conference on Machine Learning (ACML 2020)}}, editor = {{Jialin Pan, Sinno and Sugiyama, Masashi}}, keywords = {{graph neural networks, Weisfeiler-Lehman test, cycle detection}}, location = {{Bangkok, Thailand}}, pages = {{49--64}}, publisher = {{PMLR}}, title = {{{A Novel Higher-order Weisfeiler-Lehman Graph Convolution}}}, volume = {{129}}, year = {{2020}}, } @inproceedings{21534, author = {{Bengs, Viktor and Hüllermeier, Eyke}}, booktitle = {{International Conference on Machine Learning}}, pages = {{778--787}}, title = {{{Preselection Bandits}}}, year = {{2020}}, } @unpublished{21536, abstract = {{We consider a resource-aware variant of the classical multi-armed bandit problem: In each round, the learner selects an arm and determines a resource limit. It then observes a corresponding (random) reward, provided the (random) amount of consumed resources remains below the limit. Otherwise, the observation is censored, i.e., no reward is obtained. For this problem setting, we introduce a measure of regret, which incorporates the actual amount of allocated resources of each learning round as well as the optimality of realizable rewards. Thus, to minimize regret, the learner needs to set a resource limit and choose an arm in such a way that the chance to realize a high reward within the predefined resource limit is high, while the resource limit itself should be kept as low as possible. We derive the theoretical lower bound on the cumulative regret and propose a learning algorithm having a regret upper bound that matches the lower bound. In a simulation study, we show that our learning algorithm outperforms straightforward extensions of standard multi-armed bandit algorithms.}}, author = {{Bengs, Viktor and Hüllermeier, Eyke}}, booktitle = {{arXiv:2011.00813}}, title = {{{Multi-Armed Bandits with Censored Consumption of Resources}}}, year = {{2020}}, } @inproceedings{17407, author = {{Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}}, booktitle = {{Discovery Science}}, title = {{{Extreme Algorithm Selection with Dyadic Feature Representation}}}, year = {{2020}}, } @inproceedings{17408, author = {{Hanselle, Jonas Manuel and Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke}}, booktitle = {{KI 2020: Advances in Artificial Intelligence}}, title = {{{Hybrid Ranking and Regression for Algorithm Selection}}}, year = {{2020}}, } @inproceedings{17424, author = {{Tornede, Tanja and Tornede, Alexander and Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}}, booktitle = {{Proceedings of the ECMLPKDD 2020}}, title = {{{AutoML for Predictive Maintenance: One Tool to RUL Them All}}}, doi = {{10.1007/978-3-030-66770-2_8}}, year = {{2020}}, }