@inproceedings{46396,
  abstract     = {{The steady supply of new optimization methods makes the algorithm selection problem (ASP) an increasingly pressing and challenging task, specially for real-world black-box optimization problems. The introduced approach considers the ASP as a cost-sensitive classification task which is based on Exploratory Landscape Analysis. Low-level features gathered by systematic sampling of the function on the feasible set are used to predict a well-performing algorithm out of a given portfolio. Example-specific label costs are defined by the expected runtime of each candidate algorithm. We use one-sided support vector regression to solve this learning problem. The approach is illustrated by means of the optimization problems and algorithms of the BBOB’09/10 workshop.}},
  author       = {{Bischl, Bernd and Mersmann, Olaf and Trautmann, Heike and Preuß, Mike}},
  booktitle    = {{Proceedings of the 14th Annual Conference on Genetic and Evolutionary Computation}},
  isbn         = {{9781450311779}},
  keywords     = {{machine learning, exploratory landscape analysis, fitness landscape, benchmarking, evolutionary optimization, bbob test set, algorithm selection}},
  pages        = {{313–320}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Algorithm Selection Based on Exploratory Landscape Analysis and Cost-Sensitive Learning}}},
  doi          = {{10.1145/2330163.2330209}},
  year         = {{2012}},
}

@inproceedings{46401,
  abstract     = {{Exploratory Landscape Analysis subsumes a number of techniques employed to obtain knowledge about the properties of an unknown optimization problem, especially insofar as these properties are important for the performance of optimization algorithms. Where in a first attempt, one could rely on high-level features designed by experts, we approach the problem from a different angle here, namely by using relatively cheap low-level computer generated features. Interestingly, very few features are needed to separate the BBOB problem groups and also for relating a problem to high-level, expert designed features, paving the way for automatic algorithm selection.}},
  author       = {{Mersmann, Olaf and Bischl, Bernd and Trautmann, Heike and Preuss, Mike and Weihs, Claus and Rudolph, Günter}},
  booktitle    = {{Proceedings of the 13th Annual Conference on Genetic and Evolutionary Computation}},
  isbn         = {{9781450305570}},
  keywords     = {{exploratory landscape analysis, evolutionary optimization, fitness landscape, benchmarking, BBOB test set}},
  pages        = {{829–836}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Exploratory Landscape Analysis}}},
  doi          = {{10.1145/2001576.2001690}},
  year         = {{2011}},
}

@inproceedings{46405,
  abstract     = {{We present methods to answer two basic questions that arise when benchmarking optimization algorithms. The first one is: which algorithm is the ’best’ one? and the second one: which algorithm should I use for my real world problem? Both are connected and neither is easy to answer. We present methods which can be used to analyse the raw data of a benchmark experiment and derive some insight regarding the answers to these questions. We employ the presented methods to analyse the BBOB’09 benchmark results and present some initial findings.}},
  author       = {{Mersmann, Olaf and Preuss, Mike and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 11th International Conference on Parallel Problem Solving from Nature: Part I}},
  isbn         = {{3642158439}},
  keywords     = {{benchmarking, multidimensional scaling, consensus ranking, evolutionary optimization, BBOB test set}},
  pages        = {{73–82}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Benchmarking Evolutionary Algorithms: Towards Exploratory Landscape Analysis}}},
  year         = {{2010}},
}

