@unpublished{18018,
abstract = {A common statistical task lies in showing asymptotic normality of certain
statistics. In many of these situations, classical textbook results on weak
convergence theory suffice for the problem at hand. However, there are quite
some scenarios where stronger results are needed in order to establish an
asymptotic normal approximation uniformly over a family of probability
measures. In this note we collect some results in this direction. We restrict
ourselves to weak convergence in $\mathbb R^d$ with continuous limit measures.},
author = {Bengs, Viktor and Holzmann, Hajo},
booktitle = {arXiv:1903.09864},
title = {{Uniform approximation in classical weak convergence theory}},
year = {2019},
}
@unpublished{19523,
abstract = {We study the problem of learning choice functions, which play an important
role in various domains of application, most notably in the field of economics.
Formally, a choice function is a mapping from sets to sets: Given a set of
choice alternatives as input, a choice function identifies a subset of most
preferred elements. Learning choice functions from suitable training data comes
with a number of challenges. For example, the sets provided as input and the
subsets produced as output can be of any size. Moreover, since the order in
which alternatives are presented is irrelevant, a choice function should be
symmetric. Perhaps most importantly, choice functions are naturally
context-dependent, in the sense that the preference in favor of an alternative
may depend on what other options are available. We formalize the problem of
learning choice functions and present two general approaches based on two
representations of context-dependent utility functions. Both approaches are
instantiated by means of appropriate neural network architectures, and their
performance is demonstrated on suitable benchmark tasks.},
author = {Pfannschmidt, Karlson and Gupta, Pritha and Hüllermeier, Eyke},
booktitle = {arXiv:1901.10860},
title = {{Learning Choice Functions: Concepts and Architectures}},
year = {2019},
}
@inproceedings{15003,
author = {Mortier, Thomas and Wydmuch, Marek and Dembczynski, Krzysztof and Hüllermeier, Eyke and Waegeman, Willem},
booktitle = {Proceedings of the 31st Benelux Conference on Artificial Intelligence {(BNAIC} 2019) and the 28th Belgian Dutch Conference on Machine Learning (Benelearn 2019), Brussels, Belgium, November 6-8, 2019},
title = {{Set-Valued Prediction in Multi-Class Classification}},
year = {2019},
}
@article{15015,
author = {Henzgen, Sascha and Hüllermeier, Eyke},
issn = {1556-4681},
journal = {ACM Transactions on Knowledge Discovery from Data},
pages = {1--36},
title = {{Mining Rank Data}},
doi = {10.1145/3363572},
year = {2019},
}
@inproceedings{10232,
abstract = {Existing tools for automated machine learning, such as Auto-WEKA, TPOT, auto-sklearn, and more recently ML-Plan, have shown impressive results for the tasks of single-label classification and regression. Yet, there is only little work on other types of machine learning problems so far. In particular, there is almost no work on automating the engineering of machine learning solutions for multi-label classification (MLC). We show how the scope of ML-Plan, an AutoML-tool for multi-class classification, can be extended towards MLC using MEKA, which is a multi-label extension of the well-known Java library WEKA. The resulting approach recursively refines MEKA's multi-label classifiers, nesting other multi-label classifiers for meta algorithms and single-label classifiers provided by WEKA as base learners. In our evaluation, we find that the proposed approach yields strong results and performs significantly better than a set of baselines we compare with.},
author = {Wever, Marcel Dominik and Mohr, Felix and Tornede, Alexander and Hüllermeier, Eyke},
location = {Long Beach, CA, USA},
title = {{Automating Multi-Label Classification Extending ML-Plan}},
year = {2019},
}
@article{10578,
author = {Tagne, V. K. and Fotso, S. and Fono, L. A. and Hüllermeier, Eyke},
journal = {New Mathematics and Natural Computation},
number = {2},
pages = {191--213},
title = {{Choice Functions Generated by Mallows and Plackett–Luce Relations}},
volume = {15},
year = {2019},
}
@inbook{15004,
author = {Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke},
booktitle = {Discovery Science},
isbn = {9783030337773},
issn = {0302-9743},
title = {{Feature Selection for Analogy-Based Learning to Rank}},
doi = {10.1007/978-3-030-33778-0_22},
year = {2019},
}
@inproceedings{15009,
author = {Epple, Nico and Dari, Simone and Drees, Ludwig and Protschky, Valentin and Riener, Andreas},
booktitle = {2019 IEEE Intelligent Vehicles Symposium (IV)},
isbn = {9781728105604},
title = {{Influence of Cruise Control on Driver Guidance - a Comparison between System Generations and Countries}},
doi = {10.1109/ivs.2019.8814100},
year = {2019},
}
@inproceedings{15011,
author = {Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke},
booktitle = {Proceedings - 29. Workshop Computational Intelligence, Dortmund, 28. - 29. November 2019},
editor = {Hoffmann, Frank and Hüllermeier, Eyke and Mikut, Ralf},
isbn = {978-3-7315-0979-0},
location = {Dortmund},
pages = {135--146},
publisher = {KIT Scientific Publishing, Karlsruhe},
title = {{Algorithm Selection as Recommendation: From Collaborative Filtering to Dyad Ranking}},
year = {2019},
}
@inbook{15005,
author = {Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke},
booktitle = {KI 2019: Advances in Artificial Intelligence},
isbn = {9783030301781},
issn = {0302-9743},
title = {{Analogy-Based Preference Learning with Kernels}},
doi = {10.1007/978-3-030-30179-8_3},
year = {2019},
}
@unpublished{18016,
abstract = {In this paper, we introduce the Preselection Bandit problem, in which the
learner preselects a subset of arms (choice alternatives) for a user, which
then chooses the final arm from this subset. The learner is not aware of the
user's preferences, but can learn them from observed choices. In our concrete
setting, we allow these choices to be stochastic and model the user's actions
by means of the Plackett-Luce model. The learner's main task is to preselect
subsets that eventually lead to highly preferred choices. To formalize this
goal, we introduce a reasonable notion of regret and derive lower bounds on the
expected regret. Moreover, we propose algorithms for which the upper bound on
expected regret matches the lower bound up to a logarithmic term of the time
horizon.},
author = {Bengs, Viktor and Hüllermeier, Eyke},
booktitle = {arXiv:1907.06123},
title = {{Preselection Bandits under the Plackett-Luce Model}},
year = {2019},
}
@inproceedings{13132,
author = {Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke},
booktitle = {INFORMATIK 2019: 50 Jahre Gesellschaft für Informatik – Informatik für Gesellschaft},
location = {Kassel},
pages = { 273--274 },
publisher = {Gesellschaft für Informatik e.V.},
title = {{From Automated to On-The-Fly Machine Learning}},
year = {2019},
}
@article{14027,
author = {Bengs, Viktor and Eulert, Matthias and Holzmann, Hajo},
issn = {0047-259X},
journal = {Journal of Multivariate Analysis},
pages = {291--312},
title = {{Asymptotic confidence sets for the jump curve in bivariate regression problems}},
doi = {10.1016/j.jmva.2019.02.017},
year = {2019},
}
@article{15001,
author = {Couso, Ines and Borgelt, Christian and Hüllermeier, Eyke and Kruse, Rudolf},
issn = {1556-603X},
journal = {IEEE Computational Intelligence Magazine},
pages = {31--44},
title = {{Fuzzy Sets in Data Analysis: From Statistical Foundations to Machine Learning}},
doi = {10.1109/mci.2018.2881642},
year = {2019},
}
@inbook{15006,
author = {Nguyen, Vu-Linh and Destercke, Sébastien and Hüllermeier, Eyke},
booktitle = {Discovery Science},
isbn = {9783030337773},
issn = {0302-9743},
title = {{Epistemic Uncertainty Sampling}},
doi = {10.1007/978-3-030-33778-0_7},
year = {2019},
}
@inproceedings{15013,
author = {Brinker, Klaus and Hüllermeier, Eyke},
booktitle = {Proceedings ECML/PKDD, European Conference on Machine Learning and Knowledge Discovery in Databases},
title = {{A Reduction of Label Ranking to Multiclass Classification}},
year = {2019},
}
@inproceedings{8868,
author = {Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke and Hetzer, Alexander},
location = {Bayreuth, Germany},
title = {{Towards Automated Machine Learning for Multi-Label Classification}},
year = {2019},
}
@article{15025,
abstract = {In software engineering, the imprecise requirements of a user are transformed to a formal requirements specification during the requirements elicitation process. This process is usually guided by requirements engineers interviewing the user. We want to partially automate this first step of the software engineering process in order to enable users to specify a desired software system on their own. With our approach, users are only asked to provide exemplary behavioral descriptions. The problem of synthesizing a requirements specification from examples can partially be reduced to the problem of grammatical inference, to which we apply an active coevolutionary learning approach. However, this approach would usually require many feedback queries to be sent to the user. In this work, we extend and generalize our active learning approach to receive knowledge from multiple oracles, also known as proactive learning. The ‘user oracle’ represents input received from the user and the ‘knowledge oracle’ represents available, formalized domain knowledge. We call our two-oracle approach the ‘first apply knowledge then query’ (FAKT/Q) algorithm. We compare FAKT/Q to the active learning approach and provide an extensive benchmark evaluation. As result we find that the number of required user queries is reduced and the inference process is sped up significantly. Finally, with so-called On-The-Fly Markets, we present a motivation and an application of our approach where such knowledge is available.},
author = {Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko},
journal = {Evolutionary Computation},
publisher = {MIT Press Journals},
title = {{Multi-Oracle Coevolutionary Learning of Requirements Specifications from Examples in On-The-Fly Markets}},
doi = {10.1162/evco_a_00266},
year = {2019},
}
@inproceedings{10184,
author = {Schäfer, Dirk and Hüllermeier, Eyke},
booktitle = {Proc. 21st Int. Conference on Discovery Science (DS)},
pages = {161--175},
title = {{Preference-Based Reinforcement Learning Using Dyad Ranking}},
year = {2018},
}
@inproceedings{2479,
author = {Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke and Faez, Amin},
booktitle = {SCC},
location = {San Francisco, CA, USA},
publisher = {IEEE},
title = {{(WIP) Towards the Automated Composition of Machine Learning Services}},
doi = {10.1109/SCC.2018.00039},
year = {2018},
}