@article{19973, abstract = {{As a result of lightweight design, increased use is being made of high-strength steel and aluminium in car bodies. Self-piercing riveting is an established technique for joining these materials. The dissimilar properties of the two materials have led to a number of different rivet geometries in the past. Each rivet geometry fulfils the requirements of the materials within a limited range. In the present investigation, an improved rivet geometry is developed, which permits the reliable joining of two material combinations that could only be joined by two different rivet geometries up until now. Material combination 1 consists of high-strength steel on both sides, while material combination 2 comprises aluminium on the punch side and high-strength steel on the die side. The material flow and the stress and strain conditions prevailing during the joining process are analysed by means of numerical simulation. The rivet geometry is then improved step-by-step on the basis of this analysis. Finally, the improved rivet geometry is manufactured and the findings of the investigation are verified in experimental joining tests.}}, author = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}}, journal = {{Production Engineering}}, keywords = {{Self-piercing riveting, Joining technology, Rivet geometry, Multi-material design, High-strength steel, Aluminium}}, pages = {{417--423}}, title = {{{Improvement of a rivet geometry for the self-piercing riveting of high-strength steel and multi-material joints}}}, doi = {{10.1007/s11740-020-00973-w}}, volume = {{14}}, year = {{2020}}, } @article{46334, abstract = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs – both to be minimized – is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers.}}, author = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}}, issn = {{1568-4946}}, journal = {{Applied Soft Computing}}, keywords = {{Algorithm selection, Multi-objective optimization, Performance measurement, Combinatorial optimization, Traveling Salesperson Problem}}, pages = {{105901}}, title = {{{A multi-objective perspective on performance assessment and automated selection of single-objective optimization algorithms}}}, doi = {{https://doi.org/10.1016/j.asoc.2019.105901}}, volume = {{88}}, year = {{2020}}, } @inproceedings{48847, abstract = {{Dynamic optimization problems have gained significant attention in evolutionary computation as evolutionary algorithms (EAs) can easily adapt to changing environments. We show that EAs can solve the graph coloring problem for bipartite graphs more efficiently by using dynamic optimization. In our approach the graph instance is given incrementally such that the EA can reoptimize its coloring when a new edge introduces a conflict. We show that, when edges are inserted in a way that preserves graph connectivity, Randomized Local Search (RLS) efficiently finds a proper 2-coloring for all bipartite graphs. This includes graphs for which RLS and other EAs need exponential expected time in a static optimization scenario. We investigate different ways of building up the graph by popular graph traversals such as breadth-first-search and depth-first-search and analyse the resulting runtime behavior. We further show that offspring populations (e. g. a (1 + {$\lambda$}) RLS) lead to an exponential speedup in {$\lambda$}. Finally, an island model using 3 islands succeeds in an optimal time of {$\Theta$}(m) on every m-edge bipartite graph, outperforming offspring populations. This is the first example where an island model guarantees a speedup that is not bounded in the number of islands.}}, author = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}}, pages = {{1277–1285}}, publisher = {{Association for Computing Machinery}}, title = {{{More Effective Randomized Search Heuristics for Graph Coloring through Dynamic Optimization}}}, doi = {{10.1145/3377930.3390174}}, year = {{2020}}, } @inproceedings{48849, abstract = {{One-shot optimization tasks require to determine the set of solution candidates prior to their evaluation, i.e., without possibility for adaptive sampling. We consider two variants, classic one-shot optimization (where our aim is to find at least one solution of high quality) and one-shot regression (where the goal is to fit a model that resembles the true problem as well as possible). For both tasks it seems intuitive that well-distributed samples should perform better than uniform or grid-based samples, since they show a better coverage of the decision space. In practice, quasi-random designs such as Latin Hypercube Samples and low-discrepancy point sets are indeed very commonly used designs for one-shot optimization tasks. We study in this work how well low star discrepancy correlates with performance in one-shot optimization. Our results confirm an advantage of low-discrepancy designs, but also indicate the correlation between discrepancy values and overall performance is rather weak. We then demonstrate that commonly used designs may be far from optimal. More precisely, we evolve 24 very specific designs that each achieve good performance on one of our benchmark problems. Interestingly, we find that these specifically designed samples yield surprisingly good performance across the whole benchmark set. Our results therefore give strong indication that significant performance gains over state-of-the-art one-shot sampling techniques are possible, and that evolutionary algorithms can be an efficient means to evolve these.}}, author = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal and Neumann, Aneta and Neumann, Frank}}, booktitle = {{Parallel Problem Solving from Nature (PPSN XVI)}}, isbn = {{978-3-030-58111-4}}, keywords = {{Continuous optimization, Fully parallel search, One-shot optimization, Regression, Surrogate-assisted optimization}}, pages = {{111–124}}, publisher = {{Springer-Verlag}}, title = {{{Evolving Sampling Strategies for One-Shot Optimization Tasks}}}, doi = {{10.1007/978-3-030-58112-1_8}}, year = {{2020}}, } @inproceedings{48851, abstract = {{Several important optimization problems in the area of vehicle routing can be seen as variants of the classical Traveling Salesperson Problem (TSP). In the area of evolutionary computation, the Traveling Thief Problem (TTP) has gained increasing interest over the last 5 years. In this paper, we investigate the effect of weights on such problems, in the sense that the cost of traveling increases with respect to the weights of nodes already visited during a tour. This provides abstractions of important TSP variants such as the Traveling Thief Problem and time dependent TSP variants, and allows to study precisely the increase in difficulty caused by weight dependence. We provide a 3.59-approximation for this weight dependent version of TSP with metric distances and bounded positive weights. Furthermore, we conduct experimental investigations for simple randomized local search with classical mutation operators and two variants of the state-of-the-art evolutionary algorithm EAX adapted to the weighted TSP. Our results show the impact of the node weights on the position of the nodes in the resulting tour.}}, author = {{Bossek, Jakob and Casel, Katrin and Kerschke, Pascal and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}}, pages = {{1286–1294}}, publisher = {{Association for Computing Machinery}}, title = {{{The Node Weight Dependent Traveling Salesperson Problem: Approximation Algorithms and Randomized Search Heuristics}}}, doi = {{10.1145/3377930.3390243}}, year = {{2020}}, } @inproceedings{48845, abstract = {{In practice, e.g. in delivery and service scenarios, Vehicle-Routing-Problems (VRPs) often imply repeated decision making on dynamic customer requests. As in classical VRPs, tours have to be planned short while the number of serviced customers has to be maximized at the same time resulting in a multi-objective problem. Beyond that, however, dynamic requests lead to the need for re-planning of not yet realized tour parts, while already realized tour parts are irreversible. In this paper we study this type of bi-objective dynamic VRP including sequential decision making and concurrent realization of decisions. We adopt a recently proposed Dynamic Evolutionary Multi-Objective Algorithm (DEMOA) for a related VRP problem and extend it to the more realistic (here considered) scenario of multiple vehicles. We empirically show that our DEMOA is competitive with a multi-vehicle offline and clairvoyant variant of the proposed DEMOA as well as with the dynamic single-vehicle approach proposed earlier.}}, author = {{Bossek, Jakob and Grimme, Christian and Trautmann, Heike}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{decision making, dynamic optimization, evolutionary algorithms, multi-objective optimization, vehicle routing}}, pages = {{166–174}}, publisher = {{Association for Computing Machinery}}, title = {{{Dynamic Bi-Objective Routing of Multiple Vehicles}}}, doi = {{10.1145/3377930.3390146}}, year = {{2020}}, } @inproceedings{48850, abstract = {{Sequential model-based optimization (SMBO) approaches are algorithms for solving problems that require computationally or otherwise expensive function evaluations. The key design principle of SMBO is a substitution of the true objective function by a surrogate, which is used to propose the point(s) to be evaluated next. SMBO algorithms are intrinsically modular, leaving the user with many important design choices. Significant research efforts go into understanding which settings perform best for which type of problems. Most works, however, focus on the choice of the model, the acquisition function, and the strategy used to optimize the latter. The choice of the initial sampling strategy, however, receives much less attention. Not surprisingly, quite diverging recommendations can be found in the literature. We analyze in this work how the size and the distribution of the initial sample influences the overall quality of the efficient global optimization (EGO) algorithm, a well-known SMBO approach. While, overall, small initial budgets using Halton sampling seem preferable, we also observe that the performance landscape is rather unstructured. We furthermore identify several situations in which EGO performs unfavorably against random sampling. Both observations indicate that an adaptive SMBO design could be beneficial, making SMBO an interesting test-bed for automated algorithm design.}}, author = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{continuous black-box optimization, design of experiments, initial design, sequential model-based optimization}}, pages = {{778–786}}, publisher = {{Association for Computing Machinery}}, title = {{{Initial Design Strategies and Their Effects on Sequential Model-Based Optimization: An Exploratory Case Study Based on BBOB}}}, doi = {{10.1145/3377930.3390155}}, year = {{2020}}, } @article{48848, abstract = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs \textendash both to be minimized \textendash is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers. \textbullet The multi-objective perspective is naturally generalizable to multiple objectives. \textbullet Proof of relationship between HV and the PAR in the considered bi-objective space. \textbullet New insights into complementary behavior of stochastic optimization algorithms.}}, author = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}}, issn = {{1568-4946}}, journal = {{Applied Soft Computing}}, keywords = {{Algorithm selection, Combinatorial optimization, Multi-objective optimization, Performance measurement, Traveling Salesperson Problem}}, number = {{C}}, title = {{{A Multi-Objective Perspective on Performance Assessment and Automated Selection of Single-Objective Optimization Algorithms}}}, doi = {{10.1016/j.asoc.2019.105901}}, volume = {{88}}, year = {{2020}}, } @article{4562, abstract = {{Employing main and sector-specific investment-grade CDS indices from the North American and European CDS market and performing mean-variance out-of-sample analyses for conservative and aggressive investors over the period from 2006 to 2014, this paper analyzes portfolio benefits of adding corporate CDS indices to a traditional financial portfolio consisting of stock and sovereign bond indices. As a baseline result, we initially find an increase in portfolio (downside) risk-diversification when adding CDS indices, which is observed irrespective of both CDS markets, investor-types and different sub-periods, including the global financial crisis and European sovereign debt crisis. In addition, the analysis reveals higher portfolio excess returns and performance in CDS index portfolios, however, these effects clearly differ between markets, investor-types and sub-periods. Overall, portfolio benefits of adding CDS indices mainly result from the fact that institutional investors replace sovereign bond indices rather than stock indices by CDS indices due to better risk-return characteristics. Our baseline findings remain robust under a variety of robustness checks. Results from sensitivity analyses provide further important implications for institutional investors with a strategic focus on a long-term conservative portfolio management.}}, author = {{Hippert, Benjamin and Uhde, André and Wengerek, Sascha Tobias}}, journal = {{Review of Derivatives Research }}, keywords = {{Corporate credit default swap indices, Mean-variance asset allocation, Out-of-sample portfolio optimization, Portfolio risk-diversification, Portfolio performance evaluation}}, number = {{2}}, pages = {{203--259}}, title = {{{Portfolio Benefits of Adding Corporate Credit Default Swap Indices: Evidence from North America and Europe}}}, doi = {{https://doi.org/10.1007/s11147-018-9148-8}}, volume = {{22}}, year = {{2019}}, } @phdthesis{10000, abstract = {{Ultraschall wird zur Effizienzsteigerung in verfahrenstechnischen Prozessen eingesetzt. Die Betriebsparamter der Ultraschallsysteme werden empirisch ermittelt, da derzeit keine systematische Analyse der Wechselwirkung zwischen Ultraschallwandler und Schallfeld sowie kein Verfahren zur Messung der Kavitationsaktivität ohne zusätzlichen Sensor existieren. Auf Basis einer experimentellen Analyse des betrachteten sonochemischen Reaktors wird ein Finite-Elemente-Modell aufgebaut, das die Wechselwirkung zwischen Schallfeld und Ultraschallwandler berücksichtigt. Die modellbasierte Analyse zeigt, dass wegen der akustischen Eigenschaften des Autoklavs nur direkt an der Sonotrode Kavitation entsteht. Die Wechselwirkung zwischen Ultraschallwandler und Schallfeld ermöglicht Aussagen über das Schallfeld und die Kavitationsaktivität auf Basis der Rückwirkung auf den Ultraschallwandler. Die lineare Schalldruckverteilung ermöglicht eine Prognose über die Verteilung von Kavitationszonen. Das beschriebene Modell liefert wertvolle Erkenntnisse für die Auslegung, Analyse und Skalierung sonochemischer Reaktoren. Auf Grund der rauen Prozessrandbedingungen ist die Applikation von Sensoren zur Überwachung der Kavitationsaktivität in vielen sonochemischen Prozessen nicht möglich. Zur prozessbegleitenden Messung der Kavitationsaktivität wird ein Verfahren entwickelt, das die Bewertung der Kavitationsaktivität durch Auswertung der Rückwirkung auf den Ultraschallwandler erlaubt. Das Messverfahren ermöglicht eine vorhersagbare und reproduzierbare Durchführung kavitationsbasierter Prozesse und stellt eine wichtige Erweiterung für bestehende und neue Ultraschallsysteme dar.}}, author = {{Bornmann, Peter}}, keywords = {{Sonochemie, Akustische Kavitation, Kavitationsmessung, Kavitationsdetektion, FEM-Simulation Ultraschallwandler, Prozessüberwachung, FEM-Simulation Schallfeld, Self-Sensing, Piezoelektrische Ultraschallwandler, Ultraschallreinigung}}, publisher = {{Shaker}}, title = {{{Modellierung und experimentelle Charakterisierung der Wechselwirkung zwischen Ultraschallwandler und Flüssigkeit in kavitationsbasierten Prozessen}}}, year = {{2019}}, }