[{"language":[{"iso":"eng"}],"keyword":["Benchmarking","Instance Generator","Black-Box Continuous Optimization","Exploratory Landscape Analysis","Neural Networks"],"series_title":"FOGA ’23","user_id":"15504","department":[{"_id":"34"},{"_id":"819"}],"_id":"47522","status":"public","abstract":[{"lang":"eng","text":"Artificial benchmark functions are commonly used in optimization research because of their ability to rapidly evaluate potential solutions, making them a preferred substitute for real-world problems. However, these benchmark functions have faced criticism for their limited resemblance to real-world problems. In response, recent research has focused on automatically generating new benchmark functions for areas where established test suites are inadequate. These approaches have limitations, such as the difficulty of generating new benchmark functions that exhibit exploratory landscape analysis (ELA) features beyond those of existing benchmarks.The objective of this work is to develop a method for generating benchmark functions for single-objective continuous optimization with user-specified structural properties. Specifically, we aim to demonstrate a proof of concept for a method that uses an ELA feature vector to specify these properties in advance. To achieve this, we begin by generating a random sample of decision space variables and objective values. We then adjust the objective values using CMA-ES until the corresponding features of our new problem match the predefined ELA features within a specified threshold. By iteratively transforming the landscape in this way, we ensure that the resulting function exhibits the desired properties. To create the final function, we use the resulting point cloud as training data for a simple neural network that produces a function exhibiting the target ELA features. We demonstrate the effectiveness of this approach by replicating the existing functions of the well-known BBOB suite and creating new functions with ELA feature values that are not present in BBOB."}],"type":"conference","publication":"Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms","doi":"10.1145/3594805.3607136","title":"Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features","author":[{"full_name":"Prager, Raphael Patrick","last_name":"Prager","first_name":"Raphael Patrick"},{"first_name":"Konstantin","full_name":"Dietrich, Konstantin","last_name":"Dietrich"},{"last_name":"Schneider","full_name":"Schneider, Lennart","first_name":"Lennart"},{"first_name":"Lennart","full_name":"Schäpermeier, Lennart","last_name":"Schäpermeier"},{"last_name":"Bischl","full_name":"Bischl, Bernd","first_name":"Bernd"},{"first_name":"Pascal","full_name":"Kerschke, Pascal","last_name":"Kerschke"},{"id":"100740","full_name":"Trautmann, Heike","last_name":"Trautmann","orcid":"0000-0002-9788-8282","first_name":"Heike"},{"full_name":"Mersmann, Olaf","last_name":"Mersmann","first_name":"Olaf"}],"date_created":"2023-09-27T15:43:17Z","date_updated":"2023-10-16T12:33:02Z","publisher":"Association for Computing Machinery","citation":{"ama":"Prager RP, Dietrich K, Schneider L, et al. Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features. In: <i>Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms</i>. FOGA ’23. Association for Computing Machinery; 2023:129–139. doi:<a href=\"https://doi.org/10.1145/3594805.3607136\">10.1145/3594805.3607136</a>","chicago":"Prager, Raphael Patrick, Konstantin Dietrich, Lennart Schneider, Lennart Schäpermeier, Bernd Bischl, Pascal Kerschke, Heike Trautmann, and Olaf Mersmann. “Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features.” In <i>Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms</i>, 129–139. FOGA ’23. New York, NY, USA: Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3594805.3607136\">https://doi.org/10.1145/3594805.3607136</a>.","ieee":"R. P. Prager <i>et al.</i>, “Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features,” in <i>Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms</i>, 2023, pp. 129–139, doi: <a href=\"https://doi.org/10.1145/3594805.3607136\">10.1145/3594805.3607136</a>.","mla":"Prager, Raphael Patrick, et al. “Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features.” <i>Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms</i>, Association for Computing Machinery, 2023, pp. 129–139, doi:<a href=\"https://doi.org/10.1145/3594805.3607136\">10.1145/3594805.3607136</a>.","short":"R.P. Prager, K. Dietrich, L. Schneider, L. Schäpermeier, B. Bischl, P. Kerschke, H. Trautmann, O. Mersmann, in: Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms, Association for Computing Machinery, New York, NY, USA, 2023, pp. 129–139.","bibtex":"@inproceedings{Prager_Dietrich_Schneider_Schäpermeier_Bischl_Kerschke_Trautmann_Mersmann_2023, place={New York, NY, USA}, series={FOGA ’23}, title={Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features}, DOI={<a href=\"https://doi.org/10.1145/3594805.3607136\">10.1145/3594805.3607136</a>}, booktitle={Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}, publisher={Association for Computing Machinery}, author={Prager, Raphael Patrick and Dietrich, Konstantin and Schneider, Lennart and Schäpermeier, Lennart and Bischl, Bernd and Kerschke, Pascal and Trautmann, Heike and Mersmann, Olaf}, year={2023}, pages={129–139}, collection={FOGA ’23} }","apa":"Prager, R. P., Dietrich, K., Schneider, L., Schäpermeier, L., Bischl, B., Kerschke, P., Trautmann, H., &#38; Mersmann, O. (2023). Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features. <i>Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms</i>, 129–139. <a href=\"https://doi.org/10.1145/3594805.3607136\">https://doi.org/10.1145/3594805.3607136</a>"},"page":"129–139","year":"2023","place":"New York, NY, USA","publication_identifier":{"isbn":["9798400702020"]}},{"citation":{"apa":"Heins, J., Rook, J., Schäpermeier, L., Kerschke, P., Bossek, J., &#38; Trautmann, H. (2022). BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems. In G. Rudolph, A. V. Kononova, H. Aguirre, P. Kerschke, G. Ochoa, &#38; T. Tusar (Eds.), <i>Parallel Problem Solving from Nature (PPSN XVII)</i> (pp. 192–206). Springer International Publishing. <a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">https://doi.org/10.1007/978-3-031-14714-2_14</a>","mla":"Heins, Jonathan, et al. “BBE: Basin-Based Evaluation of Multimodal Multi-Objective Optimization Problems.” <i>Parallel Problem Solving from Nature (PPSN XVII)</i>, edited by Günter Rudolph et al., Springer International Publishing, 2022, pp. 192–206, doi:<a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">10.1007/978-3-031-14714-2_14</a>.","short":"J. Heins, J. Rook, L. Schäpermeier, P. Kerschke, J. Bossek, H. Trautmann, in: G. Rudolph, A.V. Kononova, H. Aguirre, P. Kerschke, G. Ochoa, T. Tusar (Eds.), Parallel Problem Solving from Nature (PPSN XVII), Springer International Publishing, Cham, 2022, pp. 192–206.","bibtex":"@inproceedings{Heins_Rook_Schäpermeier_Kerschke_Bossek_Trautmann_2022, place={Cham}, series={Lecture Notes in Computer Science}, title={BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems}, DOI={<a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">10.1007/978-3-031-14714-2_14</a>}, booktitle={Parallel Problem Solving from Nature (PPSN XVII)}, publisher={Springer International Publishing}, author={Heins, Jonathan and Rook, Jeroen and Schäpermeier, Lennart and Kerschke, Pascal and Bossek, Jakob and Trautmann, Heike}, editor={Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tusar, Tea}, year={2022}, pages={192–206}, collection={Lecture Notes in Computer Science} }","ama":"Heins J, Rook J, Schäpermeier L, Kerschke P, Bossek J, Trautmann H. BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems. In: Rudolph G, Kononova AV, Aguirre H, Kerschke P, Ochoa G, Tusar T, eds. <i>Parallel Problem Solving from Nature (PPSN XVII)</i>. Lecture Notes in Computer Science. Springer International Publishing; 2022:192–206. doi:<a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">10.1007/978-3-031-14714-2_14</a>","chicago":"Heins, Jonathan, Jeroen Rook, Lennart Schäpermeier, Pascal Kerschke, Jakob Bossek, and Heike Trautmann. “BBE: Basin-Based Evaluation of Multimodal Multi-Objective Optimization Problems.” In <i>Parallel Problem Solving from Nature (PPSN XVII)</i>, edited by Günter Rudolph, Anna V. Kononova, Hernán Aguirre, Pascal Kerschke, Gabriela Ochoa, and Tea Tusar, 192–206. Lecture Notes in Computer Science. Cham: Springer International Publishing, 2022. <a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">https://doi.org/10.1007/978-3-031-14714-2_14</a>.","ieee":"J. Heins, J. Rook, L. Schäpermeier, P. Kerschke, J. Bossek, and H. Trautmann, “BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems,” in <i>Parallel Problem Solving from Nature (PPSN XVII)</i>, 2022, pp. 192–206, doi: <a href=\"https://doi.org/10.1007/978-3-031-14714-2_14\">10.1007/978-3-031-14714-2_14</a>."},"page":"192–206","place":"Cham","publication_identifier":{"isbn":["978-3-031-14714-2"]},"doi":"10.1007/978-3-031-14714-2_14","author":[{"full_name":"Heins, Jonathan","last_name":"Heins","first_name":"Jonathan"},{"full_name":"Rook, Jeroen","last_name":"Rook","first_name":"Jeroen"},{"first_name":"Lennart","last_name":"Schäpermeier","full_name":"Schäpermeier, Lennart"},{"last_name":"Kerschke","full_name":"Kerschke, Pascal","first_name":"Pascal"},{"first_name":"Jakob","full_name":"Bossek, Jakob","id":"102979","orcid":"0000-0002-4121-4668","last_name":"Bossek"},{"first_name":"Heike","last_name":"Trautmann","full_name":"Trautmann, Heike"}],"date_updated":"2023-12-13T10:47:50Z","status":"public","editor":[{"last_name":"Rudolph","full_name":"Rudolph, Günter","first_name":"Günter"},{"full_name":"Kononova, Anna V.","last_name":"Kononova","first_name":"Anna V."},{"last_name":"Aguirre","full_name":"Aguirre, Hernán","first_name":"Hernán"},{"full_name":"Kerschke, Pascal","last_name":"Kerschke","first_name":"Pascal"},{"last_name":"Ochoa","full_name":"Ochoa, Gabriela","first_name":"Gabriela"},{"last_name":"Tusar","full_name":"Tusar, Tea","first_name":"Tea"}],"type":"conference","extern":"1","series_title":"Lecture Notes in Computer Science","user_id":"102979","department":[{"_id":"819"}],"_id":"48882","year":"2022","title":"BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems","date_created":"2023-11-14T15:58:58Z","publisher":"Springer International Publishing","abstract":[{"lang":"eng","text":"In multimodal multi-objective optimization (MMMOO), the focus is not solely on convergence in objective space, but rather also on explicitly ensuring diversity in decision space. We illustrate why commonly used diversity measures are not entirely appropriate for this task and propose a sophisticated basin-based evaluation (BBE) method. Also, BBE variants are developed, capturing the anytime behavior of algorithms. The set of BBE measures is tested by means of an algorithm configuration study. We show that these new measures also transfer properties of the well-established hypervolume (HV) indicator to the domain of MMMOO, thus also accounting for objective space convergence. Moreover, we advance MMMOO research by providing insights into the multimodal performance of the considered algorithms. Specifically, algorithms exploiting local structures are shown to outperform classical evolutionary multi-objective optimizers regarding the BBE variants and respective trade-off with HV."}],"publication":"Parallel Problem Solving from Nature (PPSN XVII)","language":[{"iso":"eng"}],"keyword":["Anytime behavior","Benchmarking","Continuous optimization","Multi-objective optimization","Multimodality","Performance metric"]},{"abstract":[{"lang":"eng","text":"Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments."}],"status":"public","publication":"Computers & Operations Research","type":"journal_article","keyword":["Multimodal optimization","Multi-objective continuous optimization","Landscape analysis","Visualization","Benchmarking","Theory","Algorithms"],"language":[{"iso":"eng"}],"_id":"46318","department":[{"_id":"34"},{"_id":"819"}],"user_id":"15504","year":"2021","page":"105489","intvolume":"       136","citation":{"ama":"Grimme C, Kerschke P, Aspar P, et al. Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization. <i>Computers &#38; Operations Research</i>. 2021;136:105489. doi:<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>","chicago":"Grimme, Christian, Pascal Kerschke, Pelin Aspar, Heike Trautmann, Mike Preuss, André H. Deutz, Hao Wang, and Michael Emmerich. “Peeking beyond Peaks: Challenges and Research Potentials of Continuous Multimodal Multi-Objective Optimization.” <i>Computers &#38; Operations Research</i> 136 (2021): 105489. <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","ieee":"C. Grimme <i>et al.</i>, “Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization,” <i>Computers &#38; Operations Research</i>, vol. 136, p. 105489, 2021, doi: <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","short":"C. Grimme, P. Kerschke, P. Aspar, H. Trautmann, M. Preuss, A.H. Deutz, H. Wang, M. Emmerich, Computers &#38; Operations Research 136 (2021) 105489.","mla":"Grimme, Christian, et al. “Peeking beyond Peaks: Challenges and Research Potentials of Continuous Multimodal Multi-Objective Optimization.” <i>Computers &#38; Operations Research</i>, vol. 136, 2021, p. 105489, doi:<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>.","bibtex":"@article{Grimme_Kerschke_Aspar_Trautmann_Preuss_Deutz_Wang_Emmerich_2021, title={Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}, volume={136}, DOI={<a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>}, journal={Computers &#38; Operations Research}, author={Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}, year={2021}, pages={105489} }","apa":"Grimme, C., Kerschke, P., Aspar, P., Trautmann, H., Preuss, M., Deutz, A. H., Wang, H., &#38; Emmerich, M. (2021). Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization. <i>Computers &#38; Operations Research</i>, <i>136</i>, 105489. <a href=\"https://doi.org/10.1016/j.cor.2021.105489\">https://doi.org/10.1016/j.cor.2021.105489</a>"},"publication_identifier":{"issn":["0305-0548"]},"title":"Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization","doi":"https://doi.org/10.1016/j.cor.2021.105489","date_updated":"2023-10-16T12:58:42Z","volume":136,"date_created":"2023-08-04T07:28:34Z","author":[{"first_name":"Christian","full_name":"Grimme, Christian","last_name":"Grimme"},{"first_name":"Pascal","full_name":"Kerschke, Pascal","last_name":"Kerschke"},{"first_name":"Pelin","full_name":"Aspar, Pelin","last_name":"Aspar"},{"full_name":"Trautmann, Heike","id":"100740","last_name":"Trautmann","orcid":"0000-0002-9788-8282","first_name":"Heike"},{"full_name":"Preuss, Mike","last_name":"Preuss","first_name":"Mike"},{"last_name":"Deutz","full_name":"Deutz, André H.","first_name":"André H."},{"first_name":"Hao","full_name":"Wang, Hao","last_name":"Wang"},{"first_name":"Michael","full_name":"Emmerich, Michael","last_name":"Emmerich"}]},{"department":[{"_id":"819"}],"user_id":"102979","_id":"48849","extern":"1","type":"conference","status":"public","author":[{"last_name":"Bossek","orcid":"0000-0002-4121-4668","id":"102979","full_name":"Bossek, Jakob","first_name":"Jakob"},{"full_name":"Doerr, Carola","last_name":"Doerr","first_name":"Carola"},{"last_name":"Kerschke","full_name":"Kerschke, Pascal","first_name":"Pascal"},{"last_name":"Neumann","full_name":"Neumann, Aneta","first_name":"Aneta"},{"last_name":"Neumann","full_name":"Neumann, Frank","first_name":"Frank"}],"date_updated":"2023-12-13T10:43:53Z","doi":"10.1007/978-3-030-58112-1_8","publication_identifier":{"isbn":["978-3-030-58111-4"]},"publication_status":"published","page":"111–124","citation":{"ama":"Bossek J, Doerr C, Kerschke P, Neumann A, Neumann F. Evolving Sampling Strategies for One-Shot Optimization Tasks. In: <i>Parallel Problem Solving from Nature (PPSN XVI)</i>. Springer-Verlag; 2020:111–124. doi:<a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">10.1007/978-3-030-58112-1_8</a>","chicago":"Bossek, Jakob, Carola Doerr, Pascal Kerschke, Aneta Neumann, and Frank Neumann. “Evolving Sampling Strategies for One-Shot Optimization Tasks.” In <i>Parallel Problem Solving from Nature (PPSN XVI)</i>, 111–124. Berlin, Heidelberg: Springer-Verlag, 2020. <a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">https://doi.org/10.1007/978-3-030-58112-1_8</a>.","ieee":"J. Bossek, C. Doerr, P. Kerschke, A. Neumann, and F. Neumann, “Evolving Sampling Strategies for One-Shot Optimization Tasks,” in <i>Parallel Problem Solving from Nature (PPSN XVI)</i>, 2020, pp. 111–124, doi: <a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">10.1007/978-3-030-58112-1_8</a>.","short":"J. Bossek, C. Doerr, P. Kerschke, A. Neumann, F. Neumann, in: Parallel Problem Solving from Nature (PPSN XVI), Springer-Verlag, Berlin, Heidelberg, 2020, pp. 111–124.","mla":"Bossek, Jakob, et al. “Evolving Sampling Strategies for One-Shot Optimization Tasks.” <i>Parallel Problem Solving from Nature (PPSN XVI)</i>, Springer-Verlag, 2020, pp. 111–124, doi:<a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">10.1007/978-3-030-58112-1_8</a>.","bibtex":"@inproceedings{Bossek_Doerr_Kerschke_Neumann_Neumann_2020, place={Berlin, Heidelberg}, title={Evolving Sampling Strategies for One-Shot Optimization Tasks}, DOI={<a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">10.1007/978-3-030-58112-1_8</a>}, booktitle={Parallel Problem Solving from Nature (PPSN XVI)}, publisher={Springer-Verlag}, author={Bossek, Jakob and Doerr, Carola and Kerschke, Pascal and Neumann, Aneta and Neumann, Frank}, year={2020}, pages={111–124} }","apa":"Bossek, J., Doerr, C., Kerschke, P., Neumann, A., &#38; Neumann, F. (2020). Evolving Sampling Strategies for One-Shot Optimization Tasks. <i>Parallel Problem Solving from Nature (PPSN XVI)</i>, 111–124. <a href=\"https://doi.org/10.1007/978-3-030-58112-1_8\">https://doi.org/10.1007/978-3-030-58112-1_8</a>"},"place":"Berlin, Heidelberg","language":[{"iso":"eng"}],"keyword":["Continuous optimization","Fully parallel search","One-shot optimization","Regression","Surrogate-assisted optimization"],"publication":"Parallel Problem Solving from Nature (PPSN XVI)","abstract":[{"text":"One-shot optimization tasks require to determine the set of solution candidates prior to their evaluation, i.e., without possibility for adaptive sampling. We consider two variants, classic one-shot optimization (where our aim is to find at least one solution of high quality) and one-shot regression (where the goal is to fit a model that resembles the true problem as well as possible). For both tasks it seems intuitive that well-distributed samples should perform better than uniform or grid-based samples, since they show a better coverage of the decision space. In practice, quasi-random designs such as Latin Hypercube Samples and low-discrepancy point sets are indeed very commonly used designs for one-shot optimization tasks. We study in this work how well low star discrepancy correlates with performance in one-shot optimization. Our results confirm an advantage of low-discrepancy designs, but also indicate the correlation between discrepancy values and overall performance is rather weak. We then demonstrate that commonly used designs may be far from optimal. More precisely, we evolve 24 very specific designs that each achieve good performance on one of our benchmark problems. Interestingly, we find that these specifically designed samples yield surprisingly good performance across the whole benchmark set. Our results therefore give strong indication that significant performance gains over state-of-the-art one-shot sampling techniques are possible, and that evolutionary algorithms can be an efficient means to evolve these.","lang":"eng"}],"date_created":"2023-11-14T15:58:53Z","publisher":"Springer-Verlag","title":"Evolving Sampling Strategies for One-Shot Optimization Tasks","year":"2020"}]
