@inproceedings{46306,
  abstract     = {{Hyperparameter optimization (HPO) is a key component of machine learning models for achieving peak predictive performance. While numerous methods and algorithms for HPO have been proposed over the last years, little progress has been made in illuminating and examining the actual structure of these black-box optimization problems. Exploratory landscape analysis (ELA) subsumes a set of techniques that can be used to gain knowledge about properties of unknown optimization problems. In this paper, we evaluate the performance of five different black-box optimizers on 30 HPO problems, which consist of two-, three- and five-dimensional continuous search spaces of the XGBoost learner trained on 10 different data sets. This is contrasted with the performance of the same optimizers evaluated on 360 problem instances from the black-box optimization benchmark (BBOB). We then compute ELA features on the HPO and BBOB problems and examine similarities and differences. A cluster analysis of the HPO and BBOB problems in ELA feature space allows us to identify how the HPO problems compare to the BBOB problems on a structural meta-level. We identify a subset of BBOB problems that are close to the HPO problems in ELA feature space and show that optimizer performance is comparably similar on these two sets of benchmark problems. We highlight open challenges of ELA for HPO and discuss potential directions of future research and applications.}},
  author       = {{Schneider, Lennart and Schäpermeier, Lennart and Prager, Raphael Patrick and Bischl, Bernd and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Parallel Problem Solving from Nature — PPSN XVII}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tušar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  pages        = {{575–589}},
  publisher    = {{Springer International Publishing}},
  title        = {{{HPO x ELA: Investigating Hyperparameter Optimization Landscapes by Means of Exploratory Landscape Analysis}}},
  doi          = {{10.1007/978-3-031-14714-2_40}},
  year         = {{2022}},
}

@inproceedings{48882,
  abstract     = {{In multimodal multi-objective optimization (MMMOO), the focus is not solely on convergence in objective space, but rather also on explicitly ensuring diversity in decision space. We illustrate why commonly used diversity measures are not entirely appropriate for this task and propose a sophisticated basin-based evaluation (BBE) method. Also, BBE variants are developed, capturing the anytime behavior of algorithms. The set of BBE measures is tested by means of an algorithm configuration study. We show that these new measures also transfer properties of the well-established hypervolume (HV) indicator to the domain of MMMOO, thus also accounting for objective space convergence. Moreover, we advance MMMOO research by providing insights into the multimodal performance of the considered algorithms. Specifically, algorithms exploiting local structures are shown to outperform classical evolutionary multi-objective optimizers regarding the BBE variants and respective trade-off with HV.}},
  author       = {{Heins, Jonathan and Rook, Jeroen and Schäpermeier, Lennart and Kerschke, Pascal and Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from Nature (PPSN XVII)}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tusar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  keywords     = {{Anytime behavior, Benchmarking, Continuous optimization, Multi-objective optimization, Multimodality, Performance metric}},
  pages        = {{192–206}},
  publisher    = {{Springer International Publishing}},
  title        = {{{BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems}}},
  doi          = {{10.1007/978-3-031-14714-2_14}},
  year         = {{2022}},
}

@inproceedings{48894,
  abstract     = {{Recently different evolutionary computation approaches have been developed that generate sets of high quality diverse solutions for a given optimisation problem. Many studies have considered diversity 1) as a mean to explore niches in behavioural space (quality diversity) or 2) to increase the structural differences of solutions (evolutionary diversity optimisation). In this study, we introduce a co-evolutionary algorithm to simultaneously explore the two spaces for the multi-component traveling thief problem. The results show the capability of the co-evolutionary algorithm to achieve significantly higher diversity compared to the baseline evolutionary diversity algorithms from the literature.}},
  author       = {{Nikfarjam, Adel and Neumann, Aneta and Bossek, Jakob and Neumann, Frank}},
  booktitle    = {{Parallel Problem Solving from Nature (PPSN XVII)}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tu\v sar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  keywords     = {{Co-evolutionary algorithms, Evolutionary diversity optimisation, Quality diversity, Traveling thief problem}},
  pages        = {{237–249}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Co-Evolutionary Diversity Optimisation for the Traveling Thief Problem}}},
  doi          = {{10.1007/978-3-031-14714-2_17}},
  year         = {{2022}},
}

@inproceedings{46304,
  abstract     = {{In recent years, feature-based automated algorithm selection using exploratory landscape analysis has demonstrated its great potential in single-objective continuous black-box optimization. However, feature computation is problem-specific and can be costly in terms of computational resources. This paper investigates feature-free approaches that rely on state-of-the-art deep learning techniques operating on either images or point clouds. We show that point-cloud-based strategies, in particular, are highly competitive and also substantially reduce the size of the required solver portfolio. Moreover, we highlight the effect and importance of cost-sensitive learning in automated algorithm selection models.}},
  author       = {{Prager, Raphael Patrick and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Parallel Problem Solving from Nature — PPSN XVII}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tušar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  pages        = {{3–17}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Automated Algorithm Selection in Single-Objective Continuous Optimization: A Comparative Study of Deep Learning and Landscape Analysis Methods}}},
  doi          = {{10.1007/978-3-031-14714-2_1}},
  year         = {{2022}},
}

@inproceedings{46302,
  author       = {{Heins, J and Rook, J and Schäpermeier, L and Kerschke, P and Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from Nature — PPSN XVII}},
  editor       = {{Rudolph, G and Kononova, AV and Aguirre, H and Kerschke, P and Ochoa, G and Tušar, T}},
  isbn         = {{978-3-031-14714-2}},
  pages        = {{192–206}},
  publisher    = {{Springer International Publishing}},
  title        = {{{BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems}}},
  year         = {{2022}},
}

