@article{16725,
author = {Richter, Cedric and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike},
journal = {Journal of Automated Software Engineering},
publisher = {Springer},
title = {{Algorithm Selection for Software Validation Based on Graph Kernels}},
year = {2020},
}
@inproceedings{18276,
abstract = {Algorithm selection (AS) deals with the automatic selection of an algorithm
from a fixed set of candidate algorithms most suitable for a specific instance
of an algorithmic problem class, where "suitability" often refers to an
algorithm's runtime. Due to possibly extremely long runtimes of candidate
algorithms, training data for algorithm selection models is usually generated
under time constraints in the sense that not all algorithms are run to
completion on all instances. Thus, training data usually comprises censored
information, as the true runtime of algorithms timed out remains unknown.
However, many standard AS approaches are not able to handle such information in
a proper way. On the other side, survival analysis (SA) naturally supports
censored data and offers appropriate ways to use such data for learning
distributional models of algorithm runtime, as we demonstrate in this work. We
leverage such models as a basis of a sophisticated decision-theoretic approach
to algorithm selection, which we dub Run2Survive. Moreover, taking advantage of
a framework of this kind, we advocate a risk-averse approach to algorithm
selection, in which the avoidance of a timeout is given high priority. In an
extensive experimental study with the standard benchmark ASlib, our approach is
shown to be highly competitive and in many cases even superior to
state-of-the-art AS approaches.},
author = {Tornede, Alexander and Wever, Marcel Dominik and Werner, Stefan and Mohr, Felix and Hüllermeier, Eyke},
booktitle = {ACML 2020},
location = {Bangkok, Thailand},
title = {{Run2Survive: A Decision-theoretic Approach to Algorithm Selection based on Survival Analysis}},
year = {2020},
}
@unpublished{18017,
abstract = {We consider an extension of the contextual multi-armed bandit problem, in
which, instead of selecting a single alternative (arm), a learner is supposed
to make a preselection in the form of a subset of alternatives. More
specifically, in each iteration, the learner is presented a set of arms and a
context, both described in terms of feature vectors. The task of the learner is
to preselect $k$ of these arms, among which a final choice is made in a second
step. In our setup, we assume that each arm has a latent (context-dependent)
utility, and that feedback on a preselection is produced according to a
Plackett-Luce model. We propose the CPPL algorithm, which is inspired by the
well-known UCB algorithm, and evaluate this algorithm on synthetic and real
data. In particular, we consider an online algorithm selection scenario, which
served as a main motivation of our problem setting. Here, an instance (which
defines the context) from a certain problem class (such as SAT) can be solved
by different algorithms (the arms), but only $k$ of these algorithms can
actually be run.},
author = {El Mesaoudi-Paul, Adil and Bengs, Viktor and Hüllermeier, Eyke},
booktitle = {arXiv:2002.04275},
title = {{Online Preselection with Context Information under the Plackett-Luce Model}},
year = {2020},
}
@inproceedings{21534,
author = {Bengs, Viktor and Hüllermeier, Eyke},
booktitle = {International Conference on Machine Learning},
pages = {778--787},
title = {{Preselection Bandits}},
year = {2020},
}
@article{14028,
author = {Bengs, Viktor and Holzmann, Hajo},
issn = {1935-7524},
journal = {Electronic Journal of Statistics},
pages = {1523--1579},
title = {{Adaptive confidence sets for kink estimation}},
doi = {10.1214/19-ejs1555},
year = {2019},
}
@article{15002,
abstract = {Many problem settings in machine learning are concerned with the simultaneous prediction of multiple target variables of diverse type. Amongst others, such problem settings arise in multivariate regression, multi-label classification, multi-task learning, dyadic prediction, zero-shot learning, network inference, and matrix completion. These subfields of machine learning are typically studied in isolation, without highlighting or exploring important relationships. In this paper, we present a unifying view on what we call multi-target prediction (MTP) problems and methods. First, we formally discuss commonalities and differences between existing MTP problems. To this end, we introduce a general framework that covers the above subfields as special cases. As a second contribution, we provide a structured overview of MTP methods. This is accomplished by identifying a number of key properties, which distinguish such methods and determine their suitability for different types of problems. Finally, we also discuss a few challenges for future research.},
author = {Waegeman, Willem and Dembczynski, Krzysztof and Hüllermeier, Eyke},
issn = {1573-756X},
journal = {Data Mining and Knowledge Discovery},
number = {2},
pages = {293--324},
title = {{Multi-target prediction: a unifying view on problems and methods}},
doi = {10.1007/s10618-018-0595-5},
volume = {33},
year = {2019},
}
@inproceedings{15014,
author = {Hüllermeier, Eyke and Couso, Ines and Diestercke, Sebastian},
booktitle = {Proceedings SUM 2019, International Conference on Scalable Uncertainty Management},
title = {{Learning from Imprecise Data: Adjustments of Optimistic and Pessimistic Variants}},
year = {2019},
}
@inproceedings{15007,
author = {Melnikov, Vitaly and Hüllermeier, Eyke},
booktitle = {Proceedings ACML, Asian Conference on Machine Learning (Proceedings of Machine Learning Research, 101)},
title = {{Learning to Aggregate: Tackling the Aggregation/Disaggregation Problem for OWA}},
doi = {10.1016/j.jmva.2019.02.017},
year = {2019},
}
@article{17565,
author = {Merten, Marie-Luis and Seemann, Nina and Wever, Marcel Dominik},
journal = {Niederdeutsches Jahrbuch},
number = {142},
pages = {124--146},
title = {{Grammatikwandel digital-kulturwissenschaftlich erforscht. Mittelniederdeutscher Sprachausbau im interdisziplinären Zugriff}},
year = {2019},
}
@unpublished{18018,
abstract = {A common statistical task lies in showing asymptotic normality of certain
statistics. In many of these situations, classical textbook results on weak
convergence theory suffice for the problem at hand. However, there are quite
some scenarios where stronger results are needed in order to establish an
asymptotic normal approximation uniformly over a family of probability
measures. In this note we collect some results in this direction. We restrict
ourselves to weak convergence in $\mathbb R^d$ with continuous limit measures.},
author = {Bengs, Viktor and Holzmann, Hajo},
booktitle = {arXiv:1903.09864},
title = {{Uniform approximation in classical weak convergence theory}},
year = {2019},
}
@unpublished{19523,
abstract = {We study the problem of learning choice functions, which play an important
role in various domains of application, most notably in the field of economics.
Formally, a choice function is a mapping from sets to sets: Given a set of
choice alternatives as input, a choice function identifies a subset of most
preferred elements. Learning choice functions from suitable training data comes
with a number of challenges. For example, the sets provided as input and the
subsets produced as output can be of any size. Moreover, since the order in
which alternatives are presented is irrelevant, a choice function should be
symmetric. Perhaps most importantly, choice functions are naturally
context-dependent, in the sense that the preference in favor of an alternative
may depend on what other options are available. We formalize the problem of
learning choice functions and present two general approaches based on two
representations of context-dependent utility functions. Both approaches are
instantiated by means of appropriate neural network architectures, and their
performance is demonstrated on suitable benchmark tasks.},
author = {Pfannschmidt, Karlson and Gupta, Pritha and Hüllermeier, Eyke},
booktitle = {arXiv:1901.10860},
title = {{Learning Choice Functions: Concepts and Architectures}},
year = {2019},
}
@inproceedings{15003,
author = {Mortier, Thomas and Wydmuch, Marek and Dembczynski, Krzysztof and Hüllermeier, Eyke and Waegeman, Willem},
booktitle = {Proceedings of the 31st Benelux Conference on Artificial Intelligence {(BNAIC} 2019) and the 28th Belgian Dutch Conference on Machine Learning (Benelearn 2019), Brussels, Belgium, November 6-8, 2019},
title = {{Set-Valued Prediction in Multi-Class Classification}},
year = {2019},
}
@article{15015,
author = {Henzgen, Sascha and Hüllermeier, Eyke},
issn = {1556-4681},
journal = {ACM Transactions on Knowledge Discovery from Data},
pages = {1--36},
title = {{Mining Rank Data}},
doi = {10.1145/3363572},
year = {2019},
}
@inproceedings{10232,
abstract = {Existing tools for automated machine learning, such as Auto-WEKA, TPOT, auto-sklearn, and more recently ML-Plan, have shown impressive results for the tasks of single-label classification and regression. Yet, there is only little work on other types of machine learning problems so far. In particular, there is almost no work on automating the engineering of machine learning solutions for multi-label classification (MLC). We show how the scope of ML-Plan, an AutoML-tool for multi-class classification, can be extended towards MLC using MEKA, which is a multi-label extension of the well-known Java library WEKA. The resulting approach recursively refines MEKA's multi-label classifiers, nesting other multi-label classifiers for meta algorithms and single-label classifiers provided by WEKA as base learners. In our evaluation, we find that the proposed approach yields strong results and performs significantly better than a set of baselines we compare with.},
author = {Wever, Marcel Dominik and Mohr, Felix and Tornede, Alexander and Hüllermeier, Eyke},
location = {Long Beach, CA, USA},
title = {{Automating Multi-Label Classification Extending ML-Plan}},
year = {2019},
}
@article{10578,
author = {Tagne, V. K. and Fotso, S. and Fono, L. A. and Hüllermeier, Eyke},
journal = {New Mathematics and Natural Computation},
number = {2},
pages = {191--213},
title = {{Choice Functions Generated by Mallows and Plackett–Luce Relations}},
volume = {15},
year = {2019},
}
@inbook{15004,
author = {Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke},
booktitle = {Discovery Science},
isbn = {9783030337773},
issn = {0302-9743},
title = {{Feature Selection for Analogy-Based Learning to Rank}},
doi = {10.1007/978-3-030-33778-0_22},
year = {2019},
}
@inproceedings{15009,
author = {Epple, Nico and Dari, Simone and Drees, Ludwig and Protschky, Valentin and Riener, Andreas},
booktitle = {2019 IEEE Intelligent Vehicles Symposium (IV)},
isbn = {9781728105604},
title = {{Influence of Cruise Control on Driver Guidance - a Comparison between System Generations and Countries}},
doi = {10.1109/ivs.2019.8814100},
year = {2019},
}
@inproceedings{15011,
author = {Tornede, Alexander and Wever, Marcel Dominik and Hüllermeier, Eyke},
booktitle = {Proceedings - 29. Workshop Computational Intelligence, Dortmund, 28. - 29. November 2019},
editor = {Hoffmann, Frank and Hüllermeier, Eyke and Mikut, Ralf},
isbn = {978-3-7315-0979-0},
location = {Dortmund},
pages = {135--146},
publisher = {KIT Scientific Publishing, Karlsruhe},
title = {{Algorithm Selection as Recommendation: From Collaborative Filtering to Dyad Ranking}},
year = {2019},
}
@inbook{15005,
author = {Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke},
booktitle = {KI 2019: Advances in Artificial Intelligence},
isbn = {9783030301781},
issn = {0302-9743},
title = {{Analogy-Based Preference Learning with Kernels}},
doi = {10.1007/978-3-030-30179-8_3},
year = {2019},
}
@inproceedings{13132,
author = {Mohr, Felix and Wever, Marcel Dominik and Tornede, Alexander and Hüllermeier, Eyke},
booktitle = {INFORMATIK 2019: 50 Jahre Gesellschaft für Informatik – Informatik für Gesellschaft},
location = {Kassel},
pages = { 273--274 },
publisher = {Gesellschaft für Informatik e.V.},
title = {{From Automated to On-The-Fly Machine Learning}},
year = {2019},
}
@article{14027,
author = {Bengs, Viktor and Eulert, Matthias and Holzmann, Hajo},
issn = {0047-259X},
journal = {Journal of Multivariate Analysis},
pages = {291--312},
title = {{Asymptotic confidence sets for the jump curve in bivariate regression problems}},
doi = {10.1016/j.jmva.2019.02.017},
year = {2019},
}
@article{15001,
author = {Couso, Ines and Borgelt, Christian and Hüllermeier, Eyke and Kruse, Rudolf},
issn = {1556-603X},
journal = {IEEE Computational Intelligence Magazine},
pages = {31--44},
title = {{Fuzzy Sets in Data Analysis: From Statistical Foundations to Machine Learning}},
doi = {10.1109/mci.2018.2881642},
year = {2019},
}
@inbook{15006,
author = {Nguyen, Vu-Linh and Destercke, Sébastien and Hüllermeier, Eyke},
booktitle = {Discovery Science},
isbn = {9783030337773},
issn = {0302-9743},
title = {{Epistemic Uncertainty Sampling}},
doi = {10.1007/978-3-030-33778-0_7},
year = {2019},
}
@inproceedings{15013,
author = {Brinker, Klaus and Hüllermeier, Eyke},
booktitle = {Proceedings ECML/PKDD, European Conference on Machine Learning and Knowledge Discovery in Databases},
title = {{A Reduction of Label Ranking to Multiclass Classification}},
year = {2019},
}
@inproceedings{8868,
author = {Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke and Hetzer, Alexander},
location = {Bayreuth, Germany},
title = {{Towards Automated Machine Learning for Multi-Label Classification}},
year = {2019},
}
@article{15025,
abstract = {In software engineering, the imprecise requirements of a user are transformed to a formal requirements specification during the requirements elicitation process. This process is usually guided by requirements engineers interviewing the user. We want to partially automate this first step of the software engineering process in order to enable users to specify a desired software system on their own. With our approach, users are only asked to provide exemplary behavioral descriptions. The problem of synthesizing a requirements specification from examples can partially be reduced to the problem of grammatical inference, to which we apply an active coevolutionary learning approach. However, this approach would usually require many feedback queries to be sent to the user. In this work, we extend and generalize our active learning approach to receive knowledge from multiple oracles, also known as proactive learning. The ‘user oracle’ represents input received from the user and the ‘knowledge oracle’ represents available, formalized domain knowledge. We call our two-oracle approach the ‘first apply knowledge then query’ (FAKT/Q) algorithm. We compare FAKT/Q to the active learning approach and provide an extensive benchmark evaluation. As result we find that the number of required user queries is reduced and the inference process is sped up significantly. Finally, with so-called On-The-Fly Markets, we present a motivation and an application of our approach where such knowledge is available.},
author = {Wever, Marcel Dominik and van Rooijen, Lorijn and Hamann, Heiko},
journal = {Evolutionary Computation},
publisher = {MIT Press Journals},
title = {{Multi-Oracle Coevolutionary Learning of Requirements Specifications from Examples in On-The-Fly Markets}},
doi = {10.1162/evco_a_00266},
year = {2019},
}
@inproceedings{10184,
author = {Schäfer, Dirk and Hüllermeier, Eyke},
booktitle = {Proc. 21st Int. Conference on Discovery Science (DS)},
pages = {161--175},
title = {{Preference-Based Reinforcement Learning Using Dyad Ranking}},
year = {2018},
}
@inproceedings{2479,
author = {Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke and Faez, Amin},
booktitle = {SCC},
location = {San Francisco, CA, USA},
publisher = {IEEE},
title = {{(WIP) Towards the Automated Composition of Machine Learning Services}},
doi = {10.1109/SCC.2018.00039},
year = {2018},
}
@inproceedings{3852,
abstract = {In automated machine learning (AutoML), the process of engineering machine learning applications with respect to a specific problem is (partially) automated.
Various AutoML tools have already been introduced to provide out-of-the-box machine learning functionality.
More specifically, by selecting machine learning algorithms and optimizing their hyperparameters, these tools produce a machine learning pipeline tailored to the problem at hand.
Except for TPOT, all of these tools restrict the maximum number of processing steps of such a pipeline.
However, as TPOT follows an evolutionary approach, it suffers from performance issues when dealing with larger datasets.
In this paper, we present an alternative approach leveraging a hierarchical planning to configure machine learning pipelines that are unlimited in length.
We evaluate our approach and find its performance to be competitive with other AutoML tools, including TPOT.},
author = {Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke},
booktitle = {ICML 2018 AutoML Workshop},
keyword = {automated machine learning, complex pipelines, hierarchical planning},
location = {Stockholm, Sweden},
title = {{ML-Plan for Unlimited-Length Machine Learning Pipelines}},
year = {2018},
}
@proceedings{10591,
editor = {Abiteboul, S. and Arenas, M. and Barceló, P. and Bienvenu, M. and Calvanese, D. and David, C. and Hull, R. and Hüllermeier, Eyke and Kimelfeld, B. and Libkin, L. and Martens, W. and Milo, T. and Murlak, F. and Neven, F. and Ortiz, M. and Schwentick, T. and Stoyanovich, J. and Su, J. and Suciu, D. and Vianu, V. and Yi, K.},
number = {1},
pages = {1--29},
title = {{Research Directions for Principles of Data Management}},
volume = {7},
year = {2018},
}
@unpublished{17713,
author = {Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke},
publisher = {Arxiv},
title = {{Automated Multi-Label Classification based on ML-Plan}},
year = {2018},
}
@misc{5936,
author = {Scheibl, Manuel},
publisher = {Universität Paderborn},
title = {{Learning about learning curves from dataset properties}},
year = {2018},
}
@unpublished{19524,
abstract = {Object ranking is an important problem in the realm of preference learning.
On the basis of training data in the form of a set of rankings of objects,
which are typically represented as feature vectors, the goal is to learn a
ranking function that predicts a linear order of any new set of objects.
Current approaches commonly focus on ranking by scoring, i.e., on learning an
underlying latent utility function that seeks to capture the inherent utility
of each object. These approaches, however, are not able to take possible
effects of context-dependence into account, where context-dependence means that
the utility or usefulness of an object may also depend on what other objects
are available as alternatives. In this paper, we formalize the problem of
context-dependent ranking and present two general approaches based on two
natural representations of context-dependent ranking functions. Both approaches
are instantiated by means of appropriate neural network architectures, which
are evaluated on suitable benchmark task.},
author = {Pfannschmidt, Karlson and Gupta, Pritha and Hüllermeier, Eyke},
booktitle = {arXiv:1803.05796},
title = {{Deep Architectures for Learning Context-dependent Ranking Functions}},
year = {2018},
}
@article{24150,
author = {Ramaswamy, Arunselvan and Bhatnagar, Shalabh},
journal = {IEEE Transactions on Automatic Control},
number = {6},
pages = {2614--2620},
publisher = {IEEE},
title = {{Stability of stochastic approximations with “controlled markov” noise and temporal difference learning}},
volume = {64},
year = {2018},
}
@inproceedings{10148,
author = {El Mesaoudi-Paul, Adil and Hüllermeier, Eyke and Busa-Fekete, Robert},
booktitle = {Proc. 35th Int. Conference on Machine Learning (ICML)},
pages = {3469--3477},
title = {{Ranking Distributions based on Noisy Sorting}},
year = {2018},
}
@inproceedings{10181,
author = {Nguyen, Vu-Linh and Destercke, Sebastian and Masson, M.-H. and Hüllermeier, Eyke},
booktitle = {Proc. 27th Int.Joint Conference on Artificial Intelligence (IJCAI)},
pages = {5089--5095},
title = {{Reliable Multi-class Classification based on Pairwise Epistemic and Aleatoric Uncertainty}},
year = {2018},
}
@article{16038,
author = {Schäfer, D. and Hüllermeier, Eyke},
journal = {Machine Learning},
number = {5},
pages = {903--941},
title = {{Dyad ranking using Plackett-Luce models based on joint feature representations}},
volume = {107},
year = {2018},
}
@inproceedings{2109,
abstract = {In multinomial classification, reduction techniques are commonly used to decompose the original learning problem into several simpler problems. For example, by recursively bisecting the original set of classes, so-called nested dichotomies define a set of binary classification problems that are organized in the structure of a binary tree. In contrast to the existing one-shot heuristics for constructing nested dichotomies and motivated by recent work on algorithm configuration, we propose a genetic algorithm for optimizing the structure of such dichotomies. A key component of this approach is the proposed genetic representation that facilitates the application of standard genetic operators, while still supporting the exchange of partial solutions under recombination. We evaluate the approach in an extensive experimental study, showing that it yields classifiers with superior generalization performance.},
author = {Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke},
booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference, GECCO 2018, Kyoto, Japan, July 15-19, 2018},
keyword = {Classification, Hierarchical Decomposition, Indirect Encoding},
location = {Kyoto, Japan},
publisher = {ACM},
title = {{Ensembles of Evolved Nested Dichotomies for Classification}},
doi = {10.1145/3205455.3205562},
year = {2018},
}
@inproceedings{2471,
author = {Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke},
booktitle = {SCC},
location = {San Francisco, CA, USA},
publisher = {IEEE Computer Society},
title = {{On-The-Fly Service Construction with Prototypes}},
doi = {10.1109/SCC.2018.00036},
year = {2018},
}
@inbook{6423,
author = {Schäfer, Dirk and Hüllermeier, Eyke},
booktitle = {Discovery Science},
isbn = {9783030017705},
issn = {0302-9743},
pages = {161--175},
publisher = {Springer International Publishing},
title = {{Preference-Based Reinforcement Learning Using Dyad Ranking}},
doi = {10.1007/978-3-030-01771-2_11},
year = {2018},
}
@unpublished{17714,
author = {Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke},
title = {{Automated machine learning service composition}},
year = {2018},
}
@article{24151,
author = {Demirel, Burak and Ramaswamy, Arunselvan and Quevedo, Daniel E and Karl, Holger},
journal = {IEEE Control Systems Letters},
number = {4},
pages = {737--742},
publisher = {IEEE},
title = {{Deepcas: A deep reinforcement learning algorithm for control-aware scheduling}},
volume = {2},
year = {2018},
}
@article{10276,
author = {Schäfer, Dirk and Hüllermeier, Eyke},
journal = {Machine Learning},
number = {5},
pages = {903--941},
title = {{Dyad Ranking Using Plackett-Luce Models based on joint feature representations}},
volume = {107},
year = {2018},
}
@inproceedings{10149,
author = {Hesse, M. and Timmermann, J. and Hüllermeier, Eyke and Trächtler, Ansgar},
booktitle = {Proc. 4th Int. Conference on System-Integrated Intelligence: Intelligent, Flexible and Connected Systems in Products and Production, Procedia Manufacturing 24},
pages = {15--20},
title = {{A Reinforcement Learning Strategy for the Swing-Up of the Double Pendulum on a Cart}},
year = {2018},
}
@inbook{10783,
author = {Couso, Ines and Hüllermeier, Eyke},
booktitle = {Frontiers in Computational Intelligence},
editor = {Mostaghim, Sanaz and Nürnberger, Andreas and Borgelt, Christian},
pages = {31--46},
publisher = {Springer},
title = {{Statistical Inference for Incomplete Ranking Data: A Comparison of two likelihood-based estimators}},
year = {2018},
}
@inproceedings{2857,
author = {Mohr, Felix and Lettmann, Theodor and Hüllermeier, Eyke and Wever, Marcel Dominik},
booktitle = {Proceedings of the 1st ICAPS Workshop on Hierarchical Planning},
location = {Delft, Netherlands},
pages = {31--39},
publisher = {AAAI},
title = {{Programmatic Task Network Planning}},
year = {2018},
}
@inproceedings{3552,
author = {Mohr, Felix and Wever, Marcel Dominik and Hüllermeier, Eyke},
booktitle = {Proceedings of the Symposium on Intelligent Data Analysis},
location = {‘s-Hertogenbosch, the Netherlands},
title = {{Reduction Stumps for Multi-Class Classification}},
doi = {10.1007/978-3-030-01768-2_19},
year = {2018},
}
@inproceedings{10145,
author = {Ahmadi Fahandar, Mohsen and Hüllermeier, Eyke},
booktitle = {Proc. 32 nd AAAI Conference on Artificial Intelligence (AAAI)},
pages = {2951--2958},
title = {{Learning to Rank Based on Analogical Reasoning}},
year = {2018},
}
@inbook{10152,
author = {Mencia, E.Loza and Fürnkranz, J. and Hüllermeier, Eyke and Rapp, M.},
booktitle = {Explainable and Interpretable Models in Computer Vision and Machine Learning},
editor = {Jair Escalante, H. and Escalera, S. and Guyon, I. and Baro, X. and Güclüütürk, Y. and Güclü, U. and van Gerven, M.A.J.},
pages = {81--113},
publisher = {Springer},
title = {{Learning interpretable rules for multi-label classification}},
year = {2018},
}
@inproceedings{1379,
author = {Seemann, Nina and Geierhos, Michaela and Merten, Marie-Luis and Tophinke, Doris and Wever, Marcel Dominik and Hüllermeier, Eyke},
booktitle = {Postersession Computerlinguistik der 40. Jahrestagung der Deutschen Gesellschaft für Sprachwissenschaft},
editor = {Eckart, Kerstin and Schlechtweg, Dominik },
location = {Stuttgart, Germany},
title = {{Supporting the Cognitive Process in Annotation Tasks}},
year = {2018},
}