@article{63710,
  author       = {{Cenikj, Gjorgjina and Petelin, Gasper and Seiler, Moritz and Cenikj, Nikola and Eftimov, Tome}},
  journal      = {{Swarm Evol. Comput.}},
  pages        = {{101894}},
  title        = {{{Landscape features in single-objective continuous optimization: Have we hit a wall in algorithm selection generalization?}}},
  doi          = {{10.1016/J.SWEVO.2025.101894}},
  volume       = {{94}},
  year         = {{2025}},
}

@article{60220,
  author       = {{Seiler, Moritz and Kerschke, Pascal and Trautmann, Heike}},
  journal      = {{Evolutionary Computation}},
  pages        = {{1--27}},
  title        = {{{Deep-ELA: Deep Exploratory Landscape Analysis with Self-Supervised Pretrained Transformers for Single- and Multi-Objective Continuous Optimization Problems }}},
  doi          = {{https://doi.org/10.1162/evco_a_00367}},
  year         = {{2025}},
}

@inproceedings{60813,
  author       = {{Seiler, Moritz and Preuß, Oliver Ludger and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference, GECCO 2025, NH Malaga Hotel, Malaga, Spain, July 14-18, 2025}},
  editor       = {{Filipic, Bogdan}},
  pages        = {{76–84}},
  publisher    = {{ACM}},
  title        = {{{RandOptGen: A Unified Random Problem Generator for Single- and Multi-Objective Optimization Problems with Mixed-Variable Input Spaces}}},
  doi          = {{10.1145/3712256.3726478}},
  year         = {{2025}},
}

@inproceedings{60814,
  author       = {{Schede, Elias and Seiler, Moritz and Tierney, Kevin and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference, GECCO 2025, NH Malaga Hotel, Malaga, Spain, July 14-18, 2025}},
  editor       = {{Filipic, Bogdan}},
  pages        = {{1190–1198}},
  publisher    = {{ACM}},
  title        = {{{Deep reinforcement learning for instance-specific algorithm configuration (GECCO Best Paper Award)}}},
  doi          = {{10.1145/3712256.3726480}},
  year         = {{2025}},
}

@inproceedings{52749,
  author       = {{Seiler, Moritz and Rook, Jeroen and Heins, Jonathan and Preuß, Oliver Ludger and Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{2023 IEEE Symposium Series on Computational Intelligence (SSCI)}},
  publisher    = {{IEEE}},
  title        = {{{Using Reinforcement Learning for Per-Instance Algorithm Configuration on the TSP}}},
  doi          = {{10.1109/ssci52147.2023.10372008}},
  year         = {{2024}},
}

@inproceedings{58335,
  author       = {{Seiler, Moritz and Skvorc, Urban and Doerr, Carola and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization - 18th International Conference, LION 18, Ischia Island, Italy, June 9-13, 2024, Revised Selected Papers}},
  editor       = {{Festa, Paola and Ferone, Daniele and Pastore, Tommaso and Pisacane, Ornella}},
  pages        = {{361–376}},
  publisher    = {{Springer}},
  title        = {{{Synergies of Deep and Classical Exploratory Landscape Features for Automated Algorithm Selection}}},
  doi          = {{10.1007/978-3-031-75623-8_29}},
  volume       = {{14990}},
  year         = {{2024}},
}

@inproceedings{60132,
  author       = {{Seiler, Moritz and Skvorc, Urban and Cenikj, Gjorgjina and Doerr, Carola and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from Nature - PPSN XVIII - 18th International Conference, PPSN 2024, Hagenberg, Austria, September 14-18, 2024, Proceedings, Part II}},
  editor       = {{Affenzeller, Michael and Winkler, Stephan M. and Kononova, Anna V. and Trautmann, Heike and Tusar, Tea and Machado, Penousal and Bäck, Thomas}},
  pages        = {{137–153}},
  publisher    = {{Springer}},
  title        = {{{Learned Features vs. Classical ELA on Affine BBOB Functions}}},
  doi          = {{10.1007/978-3-031-70068-2_9}},
  volume       = {{15149}},
  year         = {{2024}},
}

@article{46310,
  abstract     = {{Classic automated algorithm selection (AS) for (combinatorial) optimization problems heavily relies on so-called instance features, i.e., numerical characteristics of the problem at hand ideally extracted with computationally low-demanding routines. For the traveling salesperson problem (TSP) a plethora of features have been suggested. Most of these features are, if at all, only normalized imprecisely raising the issue of feature values being strongly affected by the instance size. Such artifacts may have detrimental effects on algorithm selection models. We propose a normalization for two feature groups which stood out in multiple AS studies on the TSP: (a) features based on a minimum spanning tree (MST) and (b) nearest neighbor relationships of the input instance. To this end we theoretically derive minimum and maximum values for properties of MSTs and k-nearest neighbor graphs (NNG) of Euclidean graphs. We analyze the differences in feature space between normalized versions of these features and their unnormalized counterparts. Our empirical investigations on various TSP benchmark sets point out that the feature scaling succeeds in eliminating the effect of the instance size. A proof-of-concept AS-study shows promising results: models trained with normalized features tend to outperform those trained with the respective vanilla features.}},
  author       = {{Heins, Jonathan and Bossek, Jakob and Pohl, Janina and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  issn         = {{0304-3975}},
  journal      = {{Theoretical Computer Science}},
  keywords     = {{Feature normalization, Algorithm selection, Traveling salesperson problem}},
  pages        = {{123--145}},
  title        = {{{A study on the effects of normalized TSP features for automated algorithm selection}}},
  doi          = {{https://doi.org/10.1016/j.tcs.2022.10.019}},
  volume       = {{940}},
  year         = {{2023}},
}

@inproceedings{48898,
  abstract     = {{Automated Algorithm Configuration (AAC) usually takes a global perspective: it identifies a parameter configuration for an (optimization) algorithm that maximizes a performance metric over a set of instances. However, the optimal choice of parameters strongly depends on the instance at hand and should thus be calculated on a per-instance basis. We explore the potential of Per-Instance Algorithm Configuration (PIAC) by using Reinforcement Learning (RL). To this end, we propose a novel PIAC approach that is based on deep neural networks. We apply it to predict configurations for the Lin\textendash Kernighan heuristic (LKH) for the Traveling Salesperson Problem (TSP) individually for every single instance. To train our PIAC approach, we create a large set of 100000 TSP instances with 2000 nodes each \textemdash currently the largest benchmark set to the best of our knowledge. We compare our approach to the state-of-the-art AAC method Sequential Model-based Algorithm Configuration (SMAC). The results show that our PIAC approach outperforms this baseline on both the newly created instance set and established instance sets.}},
  author       = {{Seiler, Moritz and Rook, Jeroen and Heins, Jonathan and Preuß, Oliver Ludger and Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{2023 IEEE Symposium Series on Computational Intelligence (SSCI)}},
  pages        = {{361 -- 368}},
  title        = {{{Using Reinforcement Learning for Per-Instance Algorithm Configuration on the TSP}}},
  doi          = {{10.1109/SSCI52147.2023.10372008}},
  year         = {{2023}},
}

@inproceedings{46307,
  abstract     = {{Exploratory Landscape Analysis is a powerful technique for numerically characterizing landscapes of single-objective continuous optimization problems. Landscape insights are crucial both for problem understanding as well as for assessing benchmark set diversity and composition. Despite the irrefutable usefulness of these features, they suffer from their own ailments and downsides. Hence, in this work we provide a collection of different approaches to characterize optimization landscapes. Similar to conventional landscape features, we require a small initial sample. However, instead of computing features based on that sample, we develop alternative representations of the original sample. These range from point clouds to 2D images and, therefore, are entirely feature-free. We demonstrate and validate our devised methods on the BBOB testbed and predict, with the help of Deep Learning, the high-level, expert-based landscape properties such as the degree of multimodality and the existence of funnel structures. The quality of our approaches is on par with methods relying on the traditional landscape features. Thereby, we provide an exciting new perspective on every research area which utilizes problem information such as problem understanding and algorithm design as well as automated algorithm configuration and selection.}},
  author       = {{Seiler, Moritz and Prager, Raphael Patrick and Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{9781450392372}},
  pages        = {{657–665}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{A Collection of Deep Learning-based Feature-Free Approaches for Characterizing Single-Objective Continuous Fitness Landscapes}}},
  doi          = {{10.1145/3512290.3528834}},
  year         = {{2022}},
}

@inproceedings{46304,
  abstract     = {{In recent years, feature-based automated algorithm selection using exploratory landscape analysis has demonstrated its great potential in single-objective continuous black-box optimization. However, feature computation is problem-specific and can be costly in terms of computational resources. This paper investigates feature-free approaches that rely on state-of-the-art deep learning techniques operating on either images or point clouds. We show that point-cloud-based strategies, in particular, are highly competitive and also substantially reduce the size of the required solver portfolio. Moreover, we highlight the effect and importance of cost-sensitive learning in automated algorithm selection models.}},
  author       = {{Prager, Raphael Patrick and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Parallel Problem Solving from Nature — PPSN XVII}},
  editor       = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tušar, Tea}},
  isbn         = {{978-3-031-14714-2}},
  pages        = {{3–17}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Automated Algorithm Selection in Single-Objective Continuous Optimization: A Comparative Study of Deep Learning and Landscape Analysis Methods}}},
  doi          = {{10.1007/978-3-031-14714-2_1}},
  year         = {{2022}},
}

@inproceedings{46303,
  abstract     = {{Social media platforms are essential for information sharing and, thus, prone to coordinated dis- and misinformation campaigns. Nevertheless, research in this area is hampered by strict data sharing regulations imposed by the platforms, resulting in a lack of benchmark data. Previous work focused on circumventing these rules by either pseudonymizing the data or sharing fragments. In this work, we will address the benchmarking crisis by presenting a methodology that can be used to create artificial campaigns out of original campaign building blocks. We conduct a proof-of-concept study using the freely available generative language model GPT-Neo in this context and demonstrate that the campaign patterns can flexibly be adapted to an underlying social media stream and evade state-of-the-art campaign detection approaches based on stream clustering. Thus, we not only provide a framework for artificial benchmark generation but also demonstrate the possible adversarial nature of such benchmarks for challenging and advancing current campaign detection methods.}},
  author       = {{Pohl, Janina Susanne and Assenmacher, Dennis and Seiler, Moritz and Trautmann, Heike and Grimme, Christian}},
  booktitle    = {{Workshop Proceedings of the 16$^th$ International Conference on Web and Social Media (ICWSM)}},
  editor       = {{the Advancement of Artificial Intelligence (AAAI) Association, for}},
  pages        = {{1–10}},
  publisher    = {{AAAI Press}},
  title        = {{{Artificial Social Media Campaign Creation for Benchmarking and Challenging Detection Approaches}}},
  doi          = {{10.36190/2022.91}},
  year         = {{2022}},
}

@inproceedings{46315,
  abstract     = {{We propose a novel method for automated algorithm selection in the domain of single-objective continuous black-box optimization. In contrast to existing methods, we use convolutional neural networks as the selection apparatus which bases its decision on a so-called ‘fitness map’. This fitness map is a 2D representation of a two dimensional search space where different gray scales indicate the quality of found solutions in certain areas. Our devised approach uses a modular CMA-ES framework which offers the option to create the conventional CMA-ES, CMA-ES with the alternate step-size adaptation and many other variants proposed over the years. In total, 4 608 different configurations are possible where most configurations are of complementary nature. In this proof-of-concept work, we consider a subset of 32 possible configurations. The developed method is evaluated against an excerpt of BBOB functions and its performance is compared against baselines that are commonly used in automated algorithm selection - the best standalone algorithm (configuration) and the best obtainable sequence of configurations. While the results indicate that the use of the fitness map is not superior on every benchmark problem, it indubitably shows its merit on more hard-to-solve problems. This offers a promising perspective for generalizing to other types of optimization problems and problem domains.}},
  author       = {{Prager, Raphael Patrick and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{2021 IEEE Symposium Series on Computational Intelligence (SSCI)}},
  pages        = {{1--8}},
  title        = {{{Towards Feature-Free Automated Algorithm Selection for Single-Objective Continuous Black-Box Optimization}}},
  doi          = {{10.1109/SSCI50451.2021.9660174}},
  year         = {{2021}},
}

@inproceedings{46312,
  abstract     = {{Abuse and hate are penetrating social media and many comment sections of news media companies. These platform providers invest considerable efforts to mod- erate user-generated contributions to prevent losing readers who get appalled by inappropriate texts. This is further enforced by legislative actions, which make non-clearance of these comments a punishable action. While (semi-)automated solutions using Natural Language Processing and advanced Machine Learning techniques are getting increasingly sophisticated, the domain of abusive language detection still struggles as large non-English and well-curated datasets are scarce or not publicly available. With this work, we publish and analyse the largest annotated German abusive language comment datasets to date. In contrast to existing datasets, we achieve a high labelling standard by conducting a thorough crowd-based an- notation study that complements professional moderators’ decisions, which are also included in the dataset. We compare and cross-evaluate the performance of baseline algorithms and state-of-the-art transformer-based language models, which are fine-tuned on our datasets and an existing alternative, showing the usefulness for the community.}},
  author       = {{Assenmacher, Dennis and Niemann, Marco and Müller, Kilian and Seiler, Moritz and Riehle, Dennis M. and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1 (NeurIPS Datasets and Benchmarks 2021)}},
  pages        = {{1–14}},
  title        = {{{RP-Mod & RP-Crowd: Moderator- and Crowd-Annotated German News Comment Datasets}}},
  year         = {{2021}},
}

@inproceedings{46313,
  abstract     = {{Classic automated algorithm selection (AS) for (combinatorial) optimization problems heavily relies on so-called instance features, i.e., numerical characteristics of the problem at hand ideally extracted with computationally low-demanding routines. For the traveling salesperson problem (TSP) a plethora of features have been suggested. Most of these features are, if at all, only normalized imprecisely raising the issue of feature values being strongly affected by the instance size. Such artifacts may have detrimental effects on algorithm selection models. We propose a normalization for two feature groups which stood out in multiple AS studies on the TSP: (a) features based on a minimum spanning tree (MST) and (b) a k-nearest neighbor graph (NNG) transformation of the input instance. To this end we theoretically derive minimum and maximum values for properties of MSTs and k-NNGs of Euclidean graphs. We analyze the differences in feature space between normalized versions of these features and their unnormalized counterparts. Our empirical investigations on various TSP benchmark sets point out that the feature scaling succeeds in eliminating the effect of the instance size. Eventually, a proof-of-concept AS-study shows promising results: models trained with normalized features tend to outperform those trained with the respective vanilla features.}},
  author       = {{Heins, Jonathan and Bossek, Jakob and Pohl, Janina and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the 16$^th$ ACM/SIGEVO Conference on Foundations of genetic Algorithms (FOGA XVI)}},
  editor       = {{Computing Machinery Association, for}},
  pages        = {{1–15}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{On the Potential of Normalized TSP Features for Automated Algorithm Selection}}},
  doi          = {{10.1145/3450218.3477308}},
  year         = {{2021}},
}

@inproceedings{46331,
  abstract     = {{Artificial neural networks in general and deep learning networks in particular established themselves as popular and powerful machine learning algorithms. While the often tremendous sizes of these networks are beneficial when solving complex tasks, the tremendous number of parameters also causes such networks to be vulnerable to malicious behavior such as adversarial perturbations. These perturbations can change a model's classification decision. Moreover, while single-step adversaries can easily be transferred from network to network, the transfer of more powerful multi-step adversaries has - usually - been rather difficult.In this work, we introduce a method for generating strong adversaries that can easily (and frequently) be transferred between different models. This method is then used to generate a large set of adversaries, based on which the effects of selected defense methods are experimentally assessed. At last, we introduce a novel, simple, yet effective approach to enhance the resilience of neural networks against adversaries and benchmark it against established defense methods. In contrast to the already existing methods, our proposed defense approach is much more efficient as it only requires a single additional forward-pass to achieve comparable performance results.}},
  author       = {{Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the International Joint Conference on Neural Networks (IJCNN)}},
  pages        = {{1–8}},
  title        = {{{Enhancing Resilience of Deep Learning Networks By Means of Transferable Adversaries}}},
  doi          = {{10.1109/IJCNN48605.2020.9207338}},
  year         = {{2020}},
}

@inproceedings{46330,
  abstract     = {{In this work we focus on the well-known Euclidean Traveling Salesperson Problem (TSP) and two highly competitive inexact heuristic TSP solvers, EAX and LKH, in the context of per-instance algorithm selection (AS). We evolve instances with 1000 nodes where the solvers show strongly different performance profiles. These instances serve as a basis for an exploratory study on the identification of well-discriminating problem characteristics (features). Our results in a nutshell: we show that even though (1) promising features exist, (2) these are in line with previous results from the literature, and (3) models trained with these features are more accurate than models adopting sophisticated feature selection methods, the advantage is not close to the virtual best solver in terms of penalized average runtime and so is the performance gain over the single best solver. However, we show that a feature-free deep neural network based approach solely based on visual representation of the instances already matches classical AS model results and thus shows huge potential for future studies.}},
  author       = {{Seiler, Moritz and Pohl, Janina and Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 16$^th$ International Conference on Parallel Problem Solving from Nature (PPSN XVI)}},
  editor       = {{Bäck, Thomas and Preuss, Mike and Deutz, André and Wang, Hao and Doerr, Carola and Emmerich, Michael and Trautmann, Heike}},
  pages        = {{48–64}},
  title        = {{{Deep Learning as a Competitive Feature-Free Approach for Automated Algorithm Selection on the Traveling Salesperson Problem}}},
  doi          = {{10.1007/978-3-030-58112-1_4}},
  year         = {{2020}},
}

