@article{46318,
  abstract     = {{Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments.}},
  author       = {{Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}},
  issn         = {{0305-0548}},
  journal      = {{Computers & Operations Research}},
  keywords     = {{Multimodal optimization, Multi-objective continuous optimization, Landscape analysis, Visualization, Benchmarking, Theory, Algorithms}},
  pages        = {{105489}},
  title        = {{{Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}}},
  doi          = {{https://doi.org/10.1016/j.cor.2021.105489}},
  volume       = {{136}},
  year         = {{2021}},
}

@inproceedings{48853,
  abstract     = {{In practise, it is often desirable to provide the decision-maker with a rich set of diverse solutions of decent quality instead of just a single solution. In this paper we study evolutionary diversity optimization for the knapsack problem (KP). Our goal is to evolve a population of solutions that all have a profit of at least (1 - {$ϵ$}) {$\cdot$} OPT, where OPT is the value of an optimal solution. Furthermore, they should differ in structure with respect to an entropy-based diversity measure. To this end we propose a simple ({$\mu$} + 1)-EA with initial approximate solutions calculated by a well-known FPTAS for the KP. We investigate the effect of different standard mutation operators and introduce biased mutation and crossover which puts strong probability on flipping bits of low and/or high frequency within the population. An experimental study on different instances and settings shows that the proposed mutation operators in most cases perform slightly inferior in the long term, but show strong benefits if the number of function evaluations is severely limited.}},
  author       = {{Bossek, Jakob and Neumann, Aneta and Neumann, Frank}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-8350-9}},
  keywords     = {{evolutionary algorithms, evolutionary diversity optimization, knapsack problem, tailored operators}},
  pages        = {{556–564}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Breeding Diverse Packings for the Knapsack Problem by Means of Diversity-Tailored Evolutionary Algorithms}}},
  doi          = {{10.1145/3449639.3459364}},
  year         = {{2021}},
}

@inproceedings{48860,
  abstract     = {{In the area of evolutionary computation the calculation of diverse sets of high-quality solutions to a given optimization problem has gained momentum in recent years under the term evolutionary diversity optimization. Theoretical insights into the working principles of baseline evolutionary algorithms for diversity optimization are still rare. In this paper we study the well-known Minimum Spanning Tree problem (MST) in the context of diversity optimization where population diversity is measured by the sum of pairwise edge overlaps. Theoretical results provide insights into the fitness landscape of the MST diversity optimization problem pointing out that even for a population of {$\mu$} = 2 fitness plateaus (of constant length) can be reached, but nevertheless diverse sets can be calculated in polynomial time. We supplement our theoretical results with a series of experiments for the unconstrained and constraint case where all solutions need to fulfill a minimal quality threshold. Our results show that a simple ({$\mu$} + 1)-EA can effectively compute a diversified population of spanning trees of high quality.}},
  author       = {{Bossek, Jakob and Neumann, Frank}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-8350-9}},
  keywords     = {{evolutionary algorithms, evolutionary diversity optimization, minimum spanning tree, runtime analysis}},
  pages        = {{198–206}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Evolutionary Diversity Optimization and the Minimum Spanning Tree Problem}}},
  doi          = {{10.1145/3449639.3459363}},
  year         = {{2021}},
}

@article{48854,
  abstract     = {{We contribute to the theoretical understanding of randomized search heuristics for dynamic problems. We consider the classical vertex coloring problem on graphs and investigate the dynamic setting where edges are added to the current graph. We then analyze the expected time for randomized search heuristics to recompute high quality solutions. The (1+1) Evolutionary Algorithm and RLS operate in a setting where the number of colors is bounded and we are minimizing the number of conflicts. Iterated local search algorithms use an unbounded color palette and aim to use the smallest colors and, consequently, the smallest number of colors. We identify classes of bipartite graphs where reoptimization is as hard as or even harder than optimization from scratch, i.e., starting with a random initialization. Even adding a single edge can lead to hard symmetry problems. However, graph classes that are hard for one algorithm turn out to be easy for others. In most cases our bounds show that reoptimization is faster than optimizing from scratch. We further show that tailoring mutation operators to parts of the graph where changes have occurred can significantly reduce the expected reoptimization time. In most settings the expected reoptimization time for such tailored algorithms is linear in the number of added edges. However, tailored algorithms cannot prevent exponential times in settings where the original algorithm is inefficient.}},
  author       = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}},
  issn         = {{0178-4617}},
  journal      = {{Algorithmica}},
  keywords     = {{Dynamic optimization, Evolutionary algorithms, Running time analysis}},
  number       = {{10}},
  pages        = {{3148–3179}},
  title        = {{{Time Complexity Analysis of Randomized Search Heuristics for the Dynamic Graph Coloring Problem}}},
  doi          = {{10.1007/s00453-021-00838-3}},
  volume       = {{83}},
  year         = {{2021}},
}

@techreport{37136,
  abstract     = {{This study examines the relation between voluntary audit and the cost of debt in private firms. We use a sample of 4,058 small private firms operating in the period 2006‐2017 that are not subject to mandatory audits. Firms decide for a voluntary audit of financial statements either because the economic setting in which they operate effectively forces them to do so (e.g., ownership complexity, export‐oriented supply chain, subsidiary status) or because firm fundamentals and/or financial reporting practices limit their access to financial debt, both reflected in earnings quality. We use these factors to model the decision for voluntary audit. In the outcome analyses, we find robust evidence that voluntary audits are associated with higher, rather than lower, interest rate by up to 3.0 percentage points. This effect is present regardless of the perceived audit quality (Big‐4 vs. non‐Big‐4), but is stronger for non‐Big‐4 audits where auditees have a stronger position relative to auditors. Audited firms’ earnings are less informative about future operating performance relative to unaudited counterparts. We conclude that voluntary audits facilitate access to financial debt for firms with higher risk that may otherwise have no access to this form of financing. The price paid is reflected in higher interest rates charged to firms with voluntary audits – firms with higher information and/or fundamental risk.}},
  author       = {{Ichev, Riste and Koren, Jernej and Kosi, Urska and Sitar Sustar, Katarina and Valentincic, Aljosa}},
  keywords     = {{private firms, voluntary audit, cost of debt, self‐selection bias, risk}},
  title        = {{{Cost of Debt for Private Firms Revisited: Voluntary Audits as a Reflection of Risk}}},
  year         = {{2021}},
}

@article{21436,
  abstract     = {{Ultrasonic wire bonding is a solid-state joining process, used in the electronics industry to form electrical connections, e.g. to connect electrical terminals within semiconductor modules. Many process parameters affect the bond strength, such like the bond normal force, ultrasonic power, wire material and bonding frequency. Today, process design, development, and optimization is most likely based on the knowledge of process engineers and is mainly performed by experimental testing. In this contribution, a newly developed simulation tool is presented, to reduce time and costs and efficiently determine optimized process parameter. Based on a co-simulation of MATLAB and ANSYS, the different physical phenomena of the wire bonding process are considered using finite element simulation for the complex plastic deformation of the wire and reduced order models for the transient dynamics of the transducer, wire, substrate and bond formation. The model parameters such as the coefficients of friction between bond tool and wire and between wire and substrate were determined for aluminium and copper wire in experiments with a test rig specially developed for the requirements of heavy wire bonding. To reduce simulation time, for the finite element simulation a restart analysis and high performance computing is utilized. Detailed analysis of the bond formation showed, that the normal pressure distribution in the contact between wire and substrate has high impact on bond formation and distribution of welded areas in the contact area.}},
  author       = {{Schemmel, Reinhard and Krieger, Viktor and Hemsel, Tobias and Sextro, Walter}},
  issn         = {{0026-2714}},
  journal      = {{Microelectronics Reliability}},
  keywords     = {{Ultrasonic heavy wire bonding, Co-simulation, ANSYS, MATLAB, Process optimization, Friction coefficient, Copper-copper, Aluminium-copper}},
  pages        = {{114077}},
  title        = {{{Co-simulation of MATLAB and ANSYS for ultrasonic wire bonding process optimization}}},
  doi          = {{https://doi.org/10.1016/j.microrel.2021.114077}},
  volume       = {{119}},
  year         = {{2021}},
}

@inbook{22930,
  abstract     = {{Self-piercing riveting is an established technique for joining multi-material structures in car body manufacturing. Rivets for self-piercing riveting differ in their geometry, the material used, the condition of the material and their surface condition. To shorten the manufacturing process by omitting the heat treatment and the coating process, the authors have elaborated a concept for the use of stainless steel with high strain hardening as a rivet material. The focus of the present investigation is on the evaluation of the influences of the rivet’s geometry and material on its deformation behaviour. Conventional rivets of types P and HD2, a rivet with an improved geometry made of treatable steel 38B2, and rivets made of the stainless steels 1.3815 and 1.4541 are examined. The analysis is conducted by means of multi-step joining tests for two material combinations comprising high-strength steel HCT70X and aluminium EN AW-5083. The joints are cut to provide a cross-section and the deformation behaviour of the different rivets is analysed on the basis of the measured changes in geometry and hardness. In parallel, an examination of the force-stroke curves provides further insights. It can be demonstrated that, besides the geometry, the material strength, in particular, has a significant influence on the deformation behaviour of the rivet. The strength of steel 1.4541 is seen to be too low for the joining task, while the strength of steel 1.3815 is sufficient, and hence the investigation confirms the capability of rivets made of 1.3815 for joining even challenging material combinations.}},
  author       = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}},
  booktitle    = {{Forming the Future - Proceedings of the 13th International Conference on the Technology of Plasticity. The Minerals, Metals & Materials Series.}},
  editor       = {{Daehn, Glenn and Cao, Jian and Kinsey, Brad and Tekkaya, Erman and Vivek, Anupam and Yoshida, Yoshinori}},
  keywords     = {{Self-piercing riveting, Lightweight design, Deformation behaviour, Stainless steel, High nitrogen steel}},
  pages        = {{1495--1506}},
  publisher    = {{Springer}},
  title        = {{{Self-Piercing Riveting Using Rivets Made of Stainless Steel with High Strain Hardening}}},
  doi          = {{10.1007/978-3-030-75381-8_124}},
  year         = {{2021}},
}

@inproceedings{22274,
  abstract     = {{The use of high-strength steel and aluminium is rising due to the intensified efforts being made in lightweight design, and self-piercing riveting is becoming increasingly important. Conventional rivets for self-piercing riveting differ in their geometry, the material used, the condition of the material and the coating. To shorten the manufacturing process, the use of stainless steel with high strain hardening as the rivet material represents a promising approach. This allows the coating of the rivets to be omitted due to the corrosion resistance of the material and, since the strength of the stainless steel is achieved by cold forming, heat treatment is no longer required. In addition, it is possible to adjust the local strength within the rivet. Because of that, the authors have elaborated a concept for using high nitrogen steel 1.3815 as the rivet material. The present investigation focusses on the joint strength in order to evaluate the capability of rivets in high nitrogen steel by comparison to conventional rivets made of treatable steel. Due to certain challenges in the forming process of the high nitrogen steel rivets, deviations result from the targeted rivet geometry. Mainly these deviations cause a lower joint strength with these rivets, which is, however, adequate. All in all, the capability of the new rivet is proven by the results of this investigation. }},
  author       = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}},
  keywords     = {{Self-piercing Riveting, Joining Technology, Rivet Geometry, Rivet Material, High Nitrogen Steel, Joint Strength}},
  location     = {{Liège, Belgien}},
  title        = {{{Strength of self-piercing riveted Joints with conventional Rivets and Rivets made of High Nitrogen Steel}}},
  doi          = {{10.25518/esaform21.1911}},
  year         = {{2021}},
}

@article{32558,
  abstract     = {{With the rapid progress of technological development, self-efficacy in reference to digital devices (i.e., information and computer technology [ICT] self-efficacy) is an important driver that helps students to deal with technological problems and support their lifelong learning processes. Schools, peers, and home learning environments are important sources for the development of positive self-efficacy. Expanding on previous research, we investigated the associations between different aspects of the digital home learning environment and students’ ICT self-efficacy. The moderation effects of gender were also tested. A total of 651 children answered a questionnaire about different digital home learning environment dimensions and estimated their ICT self-efficacy using an adapted scale—Schwarzer and Jerusalem’s (1999) general self-efficacy scale. Using the structural equation modeling technique, a digital home learning environment containing six different qualities of parental support was investigated. Families’ cultural capital, parents’ attitudes toward the Internet, and shared Internet activities at home contributed positively to ICT self-efficacy. We observed small gender differences, with the moderation effect being nonsignificant. The results help researchers and practitioners to understand how different dimensions of the digital home learning environment support ICT self-efficacy. We will discuss how parents can enhance the home learning environment and how teachers can integrate this knowledge into formal education.}},
  author       = {{Bonanati, Sabrina and Buhl, Heike M.}},
  issn         = {{1387-1579}},
  journal      = {{Learning Environments Research}},
  keywords     = {{Digital media use, Gender, Home learning environment, ICT self-efcacy, Motivation, Parental involvement}},
  number       = {{2}},
  pages        = {{485--505}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{The digital home learning environment and its relation to children’s ICT self-efficacy}}},
  doi          = {{10.1007/s10984-021-09377-8}},
  volume       = {{25}},
  year         = {{2021}},
}

@inproceedings{19609,
  abstract     = {{Modern services comprise interconnected components,
e.g., microservices in a service mesh, that can scale and
run on multiple nodes across the network on demand. To process
incoming traffic, service components have to be instantiated and
traffic assigned to these instances, taking capacities and changing
demands into account. This challenge is usually solved with
custom approaches designed by experts. While this typically
works well for the considered scenario, the models often rely
on unrealistic assumptions or on knowledge that is not available
in practice (e.g., a priori knowledge).

We propose a novel deep reinforcement learning approach that
learns how to best coordinate services and is geared towards
realistic assumptions. It interacts with the network and relies on
available, possibly delayed monitoring information. Rather than
defining a complex model or an algorithm how to achieve an
objective, our model-free approach adapts to various objectives
and traffic patterns. An agent is trained offline without expert
knowledge and then applied online with minimal overhead. Compared
to a state-of-the-art heuristic, it significantly improves flow
throughput and overall network utility on real-world network
topologies and traffic traces. It also learns to optimize different
objectives, generalizes to scenarios with unseen, stochastic traffic
patterns, and scales to large real-world networks.}},
  author       = {{Schneider, Stefan Balthasar and Manzoor, Adnan and Qarawlus, Haydar and Schellenberg, Rafael and Karl, Holger and Khalili, Ramin and Hecker, Artur}},
  booktitle    = {{IEEE International Conference on Network and Service Management (CNSM)}},
  keywords     = {{self-driving networks, self-learning, network coordination, service coordination, reinforcement learning, deep learning, nfv}},
  publisher    = {{IEEE}},
  title        = {{{Self-Driving Network and Service Coordination Using Deep Reinforcement Learning}}},
  year         = {{2020}},
}

@inproceedings{17370,
  abstract     = {{ We consider a natural extension to the metric uncapacitated Facility Location Problem (FLP) in which requests ask for different commodities out of a finite set \( S \) of commodities.
  Ravi and Sinha (SODA 2004) introduced the model as the \emph{Multi-Commodity Facility Location Problem} (MFLP) and considered it an offline optimization problem.
  The model itself is similar to the FLP: i.e., requests are located at points of a finite metric space and the task of an algorithm is to construct facilities and assign requests to facilities while minimizing the construction cost and the sum over all assignment distances.
  In addition, requests and facilities are heterogeneous; they request or offer multiple commodities out of $S$.
  A request has to be connected to a set of facilities jointly offering the commodities demanded by it.
  In comparison to the FLP, an algorithm has to decide not only if and where to place facilities, but also which commodities to offer at each.

  To the best of our knowledge we are the first to study the problem in its online variant in which requests, their positions and their commodities are not known beforehand but revealed over time.
  We present results regarding the competitive ratio.
  On the one hand, we show that heterogeneity influences the competitive ratio by developing a lower bound on the competitive ratio for any randomized online algorithm of \( \Omega (  \sqrt{|S|} + \frac{\log n}{\log \log n}  ) \) that already holds for simple line metrics.
  Here, \( n \) is the number of requests.
  On the other side, we establish a deterministic \( \mathcal{O}(\sqrt{|S|} \cdot \log n) \)-competitive algorithm and a randomized \( \mathcal{O}(\sqrt{|S|} \cdot \frac{\log n}{\log \log n} ) \)-competitive algorithm.
  Further, we show that when considering a more special class of cost functions for the construction cost of a facility, the competitive ratio decreases given by our deterministic algorithm depending on the function.}},
  author       = {{Castenow, Jannik and Feldkord, Björn and Knollmann, Till and Malatyali, Manuel and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the 32nd ACM Symposium on Parallelism in Algorithms and Architectures}},
  isbn         = {{9781450369350}},
  keywords     = {{Online Multi-Commodity Facility Location, Competitive Ratio, Online Optimization, Facility Location Problem}},
  title        = {{{The Online Multi-Commodity Facility Location Problem}}},
  doi          = {{10.1145/3350755.3400281}},
  year         = {{2020}},
}

@article{20143,
  author       = {{Otroshi, Mortaza and Rossel, Moritz and Meschut, Gerson}},
  journal      = {{Journal of Advanced Joining Processes}},
  keywords     = {{Self-pierce riveting, Ductile fracture, Damage modeling, GISSMO damage model}},
  publisher    = {{Elsevier}},
  title        = {{{Stress state dependent damage modeling of self-pierce riveting process simulation using GISSMO damage model}}},
  doi          = {{10.1016/j.jajp.2020.100015}},
  volume       = {{1}},
  year         = {{2020}},
}

@article{35298,
  abstract     = {{Im  Artikel  werden  drei  verschiedene  Lernzugänge  (kom-petenzorientiertes,  ästhetisches  und  biographisches  Lernen)  vorgestellt  und  aus theoretischer Perspektive deren motivierender Gehalt für selbstreguliertes Lernen in Praxisphasen des Lehramtsstudiumsherausgearbeitet. Als theoretische Grund-lage dient die Selbstbestimmungstheorie als zentrale motivationale Theorie zur Erklärung selbstbestimmten Handelns.}},
  author       = {{Caruso, Carina and Adammek, Christine and Bonanati, Sabrina and Wiescholek, Sybille}},
  issn         = {{2625-0675}},
  journal      = {{Herausforderung Lehrer*innenbildung - Zeitschrift Zur Konzeption, Gestaltung Und Diskussion}},
  keywords     = {{ästhetische Forschung, Biographiearbeit, Praxissemester, Professionalisierung, selbstreguliertes Lernen, Motivation / aesthetic research, biographical work, long-term internship, profes-sionalization, self-regulated learning, motivation}},
  number       = {{1}},
  pages        = {{18--33}},
  title        = {{{Motivierende Lernzugänge als Ausgangspunkt der Professionalisierung angehender Lehrer_innen}}},
  doi          = {{10.4119/hlz-2540}},
  volume       = {{3}},
  year         = {{2020}},
}

@inproceedings{48847,
  abstract     = {{Dynamic optimization problems have gained significant attention in evolutionary computation as evolutionary algorithms (EAs) can easily adapt to changing environments. We show that EAs can solve the graph coloring problem for bipartite graphs more efficiently by using dynamic optimization. In our approach the graph instance is given incrementally such that the EA can reoptimize its coloring when a new edge introduces a conflict. We show that, when edges are inserted in a way that preserves graph connectivity, Randomized Local Search (RLS) efficiently finds a proper 2-coloring for all bipartite graphs. This includes graphs for which RLS and other EAs need exponential expected time in a static optimization scenario. We investigate different ways of building up the graph by popular graph traversals such as breadth-first-search and depth-first-search and analyse the resulting runtime behavior. We further show that offspring populations (e. g. a (1 + {$\lambda$}) RLS) lead to an exponential speedup in {$\lambda$}. Finally, an island model using 3 islands succeeds in an optimal time of {$\Theta$}(m) on every m-edge bipartite graph, outperforming offspring populations. This is the first example where an island model guarantees a speedup that is not bounded in the number of islands.}},
  author       = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}},
  pages        = {{1277–1285}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{More Effective Randomized Search Heuristics for Graph Coloring through Dynamic Optimization}}},
  doi          = {{10.1145/3377930.3390174}},
  year         = {{2020}},
}

@inproceedings{48849,
  abstract     = {{One-shot optimization tasks require to determine the set of solution candidates prior to their evaluation, i.e., without possibility for adaptive sampling. We consider two variants, classic one-shot optimization (where our aim is to find at least one solution of high quality) and one-shot regression (where the goal is to fit a model that resembles the true problem as well as possible). For both tasks it seems intuitive that well-distributed samples should perform better than uniform or grid-based samples, since they show a better coverage of the decision space. In practice, quasi-random designs such as Latin Hypercube Samples and low-discrepancy point sets are indeed very commonly used designs for one-shot optimization tasks. We study in this work how well low star discrepancy correlates with performance in one-shot optimization. Our results confirm an advantage of low-discrepancy designs, but also indicate the correlation between discrepancy values and overall performance is rather weak. We then demonstrate that commonly used designs may be far from optimal. More precisely, we evolve 24 very specific designs that each achieve good performance on one of our benchmark problems. Interestingly, we find that these specifically designed samples yield surprisingly good performance across the whole benchmark set. Our results therefore give strong indication that significant performance gains over state-of-the-art one-shot sampling techniques are possible, and that evolutionary algorithms can be an efficient means to evolve these.}},
  author       = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal and Neumann, Aneta and Neumann, Frank}},
  booktitle    = {{Parallel Problem Solving from Nature (PPSN XVI)}},
  isbn         = {{978-3-030-58111-4}},
  keywords     = {{Continuous optimization, Fully parallel search, One-shot optimization, Regression, Surrogate-assisted optimization}},
  pages        = {{111–124}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Evolving Sampling Strategies for One-Shot Optimization Tasks}}},
  doi          = {{10.1007/978-3-030-58112-1_8}},
  year         = {{2020}},
}

@inproceedings{48851,
  abstract     = {{Several important optimization problems in the area of vehicle routing can be seen as variants of the classical Traveling Salesperson Problem (TSP). In the area of evolutionary computation, the Traveling Thief Problem (TTP) has gained increasing interest over the last 5 years. In this paper, we investigate the effect of weights on such problems, in the sense that the cost of traveling increases with respect to the weights of nodes already visited during a tour. This provides abstractions of important TSP variants such as the Traveling Thief Problem and time dependent TSP variants, and allows to study precisely the increase in difficulty caused by weight dependence. We provide a 3.59-approximation for this weight dependent version of TSP with metric distances and bounded positive weights. Furthermore, we conduct experimental investigations for simple randomized local search with classical mutation operators and two variants of the state-of-the-art evolutionary algorithm EAX adapted to the weighted TSP. Our results show the impact of the node weights on the position of the nodes in the resulting tour.}},
  author       = {{Bossek, Jakob and Casel, Katrin and Kerschke, Pascal and Neumann, Frank}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}},
  pages        = {{1286–1294}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{The Node Weight Dependent Traveling Salesperson Problem: Approximation Algorithms and Randomized Search Heuristics}}},
  doi          = {{10.1145/3377930.3390243}},
  year         = {{2020}},
}

@inproceedings{48845,
  abstract     = {{In practice, e.g. in delivery and service scenarios, Vehicle-Routing-Problems (VRPs) often imply repeated decision making on dynamic customer requests. As in classical VRPs, tours have to be planned short while the number of serviced customers has to be maximized at the same time resulting in a multi-objective problem. Beyond that, however, dynamic requests lead to the need for re-planning of not yet realized tour parts, while already realized tour parts are irreversible. In this paper we study this type of bi-objective dynamic VRP including sequential decision making and concurrent realization of decisions. We adopt a recently proposed Dynamic Evolutionary Multi-Objective Algorithm (DEMOA) for a related VRP problem and extend it to the more realistic (here considered) scenario of multiple vehicles. We empirically show that our DEMOA is competitive with a multi-vehicle offline and clairvoyant variant of the proposed DEMOA as well as with the dynamic single-vehicle approach proposed earlier.}},
  author       = {{Bossek, Jakob and Grimme, Christian and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{decision making, dynamic optimization, evolutionary algorithms, multi-objective optimization, vehicle routing}},
  pages        = {{166–174}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Dynamic Bi-Objective Routing of Multiple Vehicles}}},
  doi          = {{10.1145/3377930.3390146}},
  year         = {{2020}},
}

@inproceedings{48850,
  abstract     = {{Sequential model-based optimization (SMBO) approaches are algorithms for solving problems that require computationally or otherwise expensive function evaluations. The key design principle of SMBO is a substitution of the true objective function by a surrogate, which is used to propose the point(s) to be evaluated next. SMBO algorithms are intrinsically modular, leaving the user with many important design choices. Significant research efforts go into understanding which settings perform best for which type of problems. Most works, however, focus on the choice of the model, the acquisition function, and the strategy used to optimize the latter. The choice of the initial sampling strategy, however, receives much less attention. Not surprisingly, quite diverging recommendations can be found in the literature. We analyze in this work how the size and the distribution of the initial sample influences the overall quality of the efficient global optimization (EGO) algorithm, a well-known SMBO approach. While, overall, small initial budgets using Halton sampling seem preferable, we also observe that the performance landscape is rather unstructured. We furthermore identify several situations in which EGO performs unfavorably against random sampling. Both observations indicate that an adaptive SMBO design could be beneficial, making SMBO an interesting test-bed for automated algorithm design.}},
  author       = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{continuous black-box optimization, design of experiments, initial design, sequential model-based optimization}},
  pages        = {{778–786}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Initial Design Strategies and Their Effects on Sequential Model-Based Optimization: An Exploratory Case Study Based on BBOB}}},
  doi          = {{10.1145/3377930.3390155}},
  year         = {{2020}},
}

@article{48848,
  abstract     = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs \textendash both to be minimized \textendash is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers. \textbullet The multi-objective perspective is naturally generalizable to multiple objectives. \textbullet Proof of relationship between HV and the PAR in the considered bi-objective space. \textbullet New insights into complementary behavior of stochastic optimization algorithms.}},
  author       = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}},
  issn         = {{1568-4946}},
  journal      = {{Applied Soft Computing}},
  keywords     = {{Algorithm selection, Combinatorial optimization, Multi-objective optimization, Performance measurement, Traveling Salesperson Problem}},
  number       = {{C}},
  title        = {{{A Multi-Objective Perspective on Performance Assessment and Automated Selection of Single-Objective Optimization Algorithms}}},
  doi          = {{10.1016/j.asoc.2019.105901}},
  volume       = {{88}},
  year         = {{2020}},
}

@article{46334,
  abstract     = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs – both to be minimized – is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers.}},
  author       = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}},
  issn         = {{1568-4946}},
  journal      = {{Applied Soft Computing}},
  keywords     = {{Algorithm selection, Multi-objective optimization, Performance measurement, Combinatorial optimization, Traveling Salesperson Problem}},
  pages        = {{105901}},
  title        = {{{A multi-objective perspective on performance assessment and automated selection of single-objective optimization algorithms}}},
  doi          = {{https://doi.org/10.1016/j.asoc.2019.105901}},
  volume       = {{88}},
  year         = {{2020}},
}

