@article{10264,
  author       = {{Leinweber, M. and Fober, T. and Strickert, M. and Baumgärtner, L. and Klebe, G. and Freisleben, B. and Hüllermeier, Eyke}},
  journal      = {{IEEE Transactions on Knowledge and Data Engineering}},
  number       = {{6}},
  pages        = {{1423--1434}},
  title        = {{{CavSimBase: A database for large scale comparison of protein binding sites}}},
  volume       = {{28}},
  year         = {{2016}},
}

@article{10266,
  author       = {{Riemenschneider, M. and Senge, Robin and Neumann, U. and Hüllermeier, Eyke and Heider, D.}},
  journal      = {{BioData Mining}},
  number       = {{10}},
  title        = {{{Exploiting HIV-1 protease and reverse transcriptase cross-resistance information for improved drug resistance prediction by means of multi-label classification}}},
  volume       = {{9}},
  year         = {{2016}},
}

@article{1373,
  author       = {{Herlich, Matthias and Bredenbals, Nico and Karl, Holger}},
  issn         = {{2210-5379}},
  journal      = {{Sustainable Computing: Informatics and Systems}},
  pages        = {{48--55}},
  publisher    = {{Elsevier BV}},
  title        = {{{Delayed (de-)activation in servers with a sleep mode}}},
  doi          = {{10.1016/j.suscom.2016.04.002}},
  volume       = {{10}},
  year         = {{2016}},
}

@inproceedings{137,
  abstract     = {{Wikidata is the new, large-scale knowledge base of the Wikimedia Foundation. Its knowledge is increasingly used within Wikipedia itself and various other kinds of information systems, imposing high demands on its integrity.Wikidata can be edited by anyone and, unfortunately, it frequently gets vandalized, exposing all information systems using it to the risk of spreading vandalized and falsified information. In this paper, we present a new machine learning-based approach to detect vandalism in Wikidata.We propose a set of 47 features that exploit both content and context information, and we report on 4 classifiers of increasing effectiveness tailored to this learning task. Our approach is evaluated on the recently published Wikidata Vandalism Corpus WDVC-2015 and it achieves an area under curve value of the receiver operating characteristic, ROC-AUC, of 0.991. It significantly outperforms the state of the art represented by the rule-based Wikidata Abuse Filter (0.865 ROC-AUC) and a prototypical vandalism detector recently introduced by Wikimedia within the Objective Revision Evaluation Service (0.859 ROC-AUC).}},
  author       = {{Heindorf, Stefan and Potthast, Matthias and Stein, Benno and Engels, Gregor}},
  booktitle    = {{Proceedings of the 25th International Conference on Information and Knowledge Management (CIKM 2016)}},
  pages        = {{327----336}},
  title        = {{{Vandalism Detection in Wikidata}}},
  doi          = {{10.1145/2983323.2983740}},
  year         = {{2016}},
}

@inbook{2978,
  author       = {{Blömer, Johannes and Bujna, Kathrin}},
  booktitle    = {{Advances in Knowledge Discovery and Data Mining}},
  isbn         = {{9783319317496}},
  issn         = {{0302-9743}},
  pages        = {{296--308}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Adaptive Seeding for Gaussian Mixture Models}}},
  doi          = {{10.1007/978-3-319-31750-2_24}},
  year         = {{2016}},
}

@article{48306,
  abstract     = {{<jats:p>The goal of argumentation mining, an evolving research field in computational linguistics, is to design methods capable of analyzing people's argumentation. In this article, we go beyond the state of the art in several ways. (i) We deal with actual Web data and take up the challenges given by the variety of registers, multiple domains, and unrestricted noisy user-generated Web discourse. (ii) We bridge the gap between normative argumentation theories and argumentation phenomena encountered in actual data by adapting an argumentation model tested in an extensive annotation study. (iii) We create a new gold standard corpus (90k tokens in 340 documents) and experiment with several machine learning methods to identify argument components. We offer the data, source codes, and annotation guidelines to the community under free licenses. Our findings show that argumentation mining in user-generated Web discourse is a feasible but challenging task.</jats:p>}},
  author       = {{Habernal, Ivan and Gurevych, Iryna}},
  issn         = {{0891-2017}},
  journal      = {{Computational Linguistics}},
  keywords     = {{Artificial Intelligence, Computer Science Applications, Linguistics and Language, Language and Linguistics}},
  number       = {{1}},
  pages        = {{125--179}},
  publisher    = {{MIT Press}},
  title        = {{{Argumentation Mining in User-Generated Web Discourse}}},
  doi          = {{10.1162/coli_a_00276}},
  volume       = {{43}},
  year         = {{2016}},
}

@inproceedings{48308,
  author       = {{Habernal, Ivan and Sukhareva, Maria and Raiber, Fiana and Shtok, Anna and Kurland, Oren and Ronen, Hadar and Bar-Ilan, Judit and Gurevych, Iryna}},
  booktitle    = {{Proceedings of the 39th International ACM SIGIR conference on Research and Development in Information Retrieval}},
  publisher    = {{ACM}},
  title        = {{{New Collection Announcement}}},
  doi          = {{10.1145/2911451.2914682}},
  year         = {{2016}},
}

@inproceedings{48307,
  author       = {{Habernal, Ivan and Gurevych, Iryna}},
  booktitle    = {{Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Which argument is more convincing? Analyzing and predicting convincingness of Web arguments using bidirectional LSTM}}},
  doi          = {{10.18653/v1/p16-1150}},
  year         = {{2016}},
}

@inproceedings{46364,
  abstract     = {{Automated algorithm configuration procedures play an increasingly important role in the development and application of algorithms for a wide range of computationally challenging problems. Until very recently, these configuration procedures were limited to optimising a single performance objective, such as the running time or solution quality achieved by the algorithm being configured. However, in many applications there is more than one performance objective of interest. This gives rise to the multi-objective automatic algorithm configuration problem, which involves finding a Pareto set of configurations of a given target algorithm that characterises trade-offs between multiple performance objectives. In this work, we introduce MO-ParamILS, a multi-objective extension of the state-of-the-art single-objective algorithm configuration framework ParamILS, and demonstrate that it produces good results on several challenging bi-objective algorithm configuration scenarios compared to a base-line obtained from using a state-of-the-art single-objective algorithm configurator.}},
  author       = {{Blot, A and Hoos, H and Jourdan, L and Marmion, M and Trautmann, Heike}},
  booktitle    = {{LION 2016: Learning and Intelligent Optimization}},
  editor       = {{et al. Joaquin, Vanschooren}},
  pages        = {{32–47}},
  publisher    = {{Springer International Publishing}},
  title        = {{{MO-ParamILS: A Multi-objective Automatic Algorithm Configuration Framework}}},
  doi          = {{10.1007/978-3-319-50349-3_3}},
  volume       = {{10079}},
  year         = {{2016}},
}

@inbook{46363,
  abstract     = {{The averaged Hausdorff distance has been proposed as an indicator for assessing the quality of finitely sized approximations of the Pareto front of a multiobjective problem. Since many set-based, iterative optimization algorithms store their currently best approximation in an internal archive these approximations are also termed archives. In case of two objectives and continuous variables it is known that the best approximations in terms of averaged Hausdorff distance are subsets of the Pareto front if it is concave. If it is linear or circularly concave the points of the best approximation are equally spaced.

Here, it is proven that the optimal averaged Hausdorff approximation and the Pareto front have an empty intersection if the Pareto front is circularly convex. But the points of the best approximation are equally spaced and they rapidly approach the Pareto front for increasing size of the approximation.}},
  author       = {{Rudolph, G and Schütze, O and Trautmann, Heike}},
  booktitle    = {{Applications of Evolutionary Computation: 19$^th$ European Conference, EvoApplications 2016, Porto, Portugal, March 30 — April 1, 2016, Proceedings, Part II}},
  editor       = {{Squillero, G and Burelli, P}},
  isbn         = {{978-3-319-31153-1}},
  pages        = {{42–55}},
  publisher    = {{Springer International Publishing}},
  title        = {{{On the Closest Averaged Hausdorff Archive for a Circularly Convex Pareto Front}}},
  doi          = {{10.1007/978-3-319-31153-1_4}},
  year         = {{2016}},
}

@inproceedings{46369,
  abstract     = {{This paper formally defines multimodality in multiobjective optimization (MO). We introduce a test-bed in which multimodal MO problems with known properties can be constructed as well as numerical characteristics of the resulting landscape. Gradient- and local search based strategies are compared on exemplary problems together with specific performance indicators in the multimodal MO setting. By this means the foundation for Exploratory Landscape Analysis in MO is provided.}},
  author       = {{Kerschke, Pascal and Wang, Hao and Preuss, Mike and Grimme, Christian and Deutz, André and Trautmann, Heike and Emmerich, Michael}},
  booktitle    = {{Proceedings of the 14$^th$ International Conference on Parallel Problem Solving from Nature (PPSN XIV)}},
  pages        = {{962–972}},
  publisher    = {{Springer}},
  title        = {{{Towards Analyzing Multimodality of Multiobjective Landscapes}}},
  doi          = {{10.1007/978-3-319-45823-6_90}},
  year         = {{2016}},
}

@inproceedings{46367,
  abstract     = {{When selecting the best suited algorithm for an unknown optimization problem, it is useful to possess some a priori knowledge of the problem at hand. In the context of single-objective, continuous optimization problems such knowledge can be retrieved by means of Exploratory Landscape Analysis (ELA), which automatically identifies properties of a landscape, e.g., the so-called funnel structures, based on an initial sample. In this paper, we extract the relevant features (for detecting funnels) out of a large set of landscape features when only given a small initial sample consisting of 50 x D observations, where D is the number of decision space dimensions. This is already in the range of the start population sizes of many evolutionary algorithms. The new Multiple Peaks Model Generator (MPM2) is used for training the classifier, and the approach is then very successfully validated on the Black-Box Optimization Benchmark (BBOB) and a subset of the CEC 2013 niching competition problems.}},
  author       = {{Kerschke, Pascal and Preuss, Mike and Wessing, Simon and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 18$^th$ Annual Conference on Genetic and Evolutionary Computation}},
  isbn         = {{978-1-4503-4206-3}},
  pages        = {{229–236}},
  title        = {{{Low-Budget Exploratory Landscape Analysis on Multiple Peaks Models}}},
  doi          = {{10.1145/2908812.2908845}},
  year         = {{2016}},
}

@article{46371,
  abstract     = {{One main task in evolutionary multiobjective optimization (EMO) is to obtain a suitable finite size approximation of the Pareto front which is the image of the solution set, termed the Pareto set, of a given multiobjective optimization problem. In the technical literature, the characteristic of the desired approximation is commonly expressed by closeness to the Pareto front and a sufficient spread of the solutions obtained. In this paper, we first make an effort to show by theoretical and empirical findings that the recently proposed Averaged Hausdorff (or Δ𝑝-) indicator indeed aims at fulfilling both performance criteria for bi-objective optimization problems. In the second part of this paper, standard EMO algorithms combined with a specialized archiver and a postprocessing step based on the Δ𝑝 indicator are introduced which sufficiently approximate the Δ𝑝-optimal archives and generate solutions evenly spread along the Pareto front.}},
  author       = {{Rudolph, G and Schütze, O and Grimme, C and Domínguez-Medina, C and Trautmann, Heike}},
  journal      = {{Computational Optimization and Applications (Comput. Optim. Appl.)}},
  number       = {{2}},
  pages        = {{589–618}},
  title        = {{{Optimal averaged Hausdorff archives for bi-objective problems: theoretical and numerical results}}},
  doi          = {{10.1007/s10589-015-9815-8}},
  volume       = {{64}},
  year         = {{2016}},
}

@article{46372,
  abstract     = {{We present a new hybrid evolutionary algorithm for the effective hypervolume approximation of the Pareto front of a given differentiable multi-objective optimization problem. Starting point for the local search (LS) mechanism is a new division of the decision space as we will argue that in each of these regions a different LS strategy seems to be most promising. For the LS in two out of the three regions we will utilize and adapt the Directed Search method which is capable of steering the search into any direction given in objective space and which is thus well suited for the problem at hand. We further on integrate the resulting LS mechanism into SMS-EMOA, a state-of-the-art evolutionary algorithm for hypervolume approximations. Finally, we will present some numerical results on several benchmark problems with two and three objectives indicating the strength and competitiveness of the novel hybrid.}},
  author       = {{Schütze, O and Sosa, Hernandez VA and Trautmann, Heike and Rudolph, G}},
  journal      = {{Journal of Heuristics}},
  number       = {{3}},
  pages        = {{273–300}},
  title        = {{{The Hypervolume based Directed Search Method for Multi-Objective Optimization Problems}}},
  doi          = {{10.1007/s10732-016-9310-0}},
  volume       = {{22}},
  year         = {{2016}},
}

@inproceedings{46368,
  abstract     = {{Exploratory Landscape Analysis (ELA) aims at understanding characteristics of single-objective continuous (black-box) optimization problems in an automated way. Moreover, the approach provides the basis for constructing algorithm selection models for unseen problem instances. Recently, it has gained increasing attention and numerical features have been designed by various research groups. This paper introduces the R-Package FLACCO which makes all relevant features available in a unified framework together with efficient helper functions. Moreover, a case study which gives perspectives to ELA for multi-objective optimization problems is presented.}},
  author       = {{Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Proceedings of the IEEE Congress on Evolutionary Computation (CEC)}},
  title        = {{{The R-Package FLACCO for Exploratory Landscape Analysis with Applications to Multi-Objective Optimization Problems}}},
  doi          = {{10.1109/CEC.2016.7748359}},
  year         = {{2016}},
}

@article{46370,
  abstract     = {{This report documents the talks and discussions at the Dagstuhl Seminar 15211 "Theory of Evolutionary Algorithms". This seminar, now in its 8th edition, is the main meeting point of the highly active theory of randomized search heuristics subcommunities in Australia, Asia, North America, and Europe. Topics intensively discussed include rigorous runtime analysis and computational complexity theory for randomised search heuristics, information geometry of randomised search, and synergies between the theory of evolutionary algorithms and theories of natural evolution.}},
  author       = {{Neumann, F and Trautmann, Heike}},
  journal      = {{Dagstuhl Reports}},
  number       = {{5}},
  pages        = {{78–79}},
  title        = {{{Working Group Report: Bridging the Gap Between Experiments and Theory Using Feature-Based Run-Time Analysis; Theory of Evolutionary Algorithms (Dagstuhl Seminar 15211)}}},
  doi          = {{10.4230/DagRep.5.5.57}},
  volume       = {{5}},
  year         = {{2016}},
}

@inproceedings{48309,
  author       = {{Habernal, Ivan and Gurevych, Iryna}},
  booktitle    = {{Proceedings of the 2016 Conference on Empirical Methods in Natural          Language Processing}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{What makes a convincing argument? Empirical analysis and detecting            attributes of convincingness in Web argumentation}}},
  doi          = {{10.18653/v1/d16-1129}},
  year         = {{2016}},
}

@inproceedings{48873,
  abstract     = {{Despite the intrinsic hardness of the Traveling Salesperson Problem (TSP) heuristic solvers, e.g., LKH+restart and EAX+restart, are remarkably successful in generating satisfactory or even optimal solutions. However, the reasons for their success are not yet fully understood. Recent approaches take an analytical viewpoint and try to identify instance features, which make an instance hard or easy to solve. We contribute to this area by generating instance sets for couples of TSP algorithms A and B by maximizing/minimizing their performance difference in order to generate instances which are easier to solve for one solver and much harder to solve for the other. This instance set offers the potential to identify key features which allow to distinguish between the problem hardness classes of both algorithms.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Festa, Paola and Sellmann, Meinolf and Vanschoren, Joaquin}},
  isbn         = {{978-3-319-50349-3}},
  keywords     = {{Algorithm selection, Feature selection, Instance hardness, TSP}},
  pages        = {{48–59}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Evolving Instances for Maximizing Performance Differences of State-of-the-Art Inexact TSP Solvers}}},
  doi          = {{10.1007/978-3-319-50349-3_4}},
  year         = {{2016}},
}

@article{52803,
  author       = {{Borgwardt, Stefan and Mailis, Theofilos and Peñaloza, Rafael and Turhan, Anni-Yasmin}},
  issn         = {{1861-2032}},
  journal      = {{Journal on Data Semantics}},
  keywords     = {{Artificial Intelligence, Computer Networks and Communications, Information Systems}},
  number       = {{2}},
  pages        = {{55--75}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Answering Fuzzy Conjunctive Queries Over Finitely Valued Fuzzy Ontologies}}},
  doi          = {{10.1007/s13740-015-0055-y}},
  volume       = {{5}},
  year         = {{2016}},
}

@inproceedings{47240,
  author       = {{Acar, Yasemin and Backes, Michael and Bugiel, Sven and Fahl, Sascha and McDaniel, Patrick and Smith, Matthew}},
  booktitle    = {{2016 IEEE Symposium on Security and Privacy (SP)}},
  publisher    = {{IEEE}},
  title        = {{{SoK: Lessons Learned from Android Security Research for Appified Software Platforms}}},
  doi          = {{10.1109/sp.2016.33}},
  year         = {{2016}},
}

