@article{51208, abstract = {{AbstractApproximation of subdifferentials is one of the main tasks when computing descent directions for nonsmooth optimization problems. In this article, we propose a bisection method for weakly lower semismooth functions which is able to compute new subgradients that improve a given approximation in case a direction with insufficient descent was computed. Combined with a recently proposed deterministic gradient sampling approach, this yields a deterministic and provably convergent way to approximate subdifferentials for computing descent directions.}}, author = {{Gebken, Bennet}}, issn = {{0926-6003}}, journal = {{Computational Optimization and Applications}}, keywords = {{Applied Mathematics, Computational Mathematics, Control and Optimization}}, publisher = {{Springer Science and Business Media LLC}}, title = {{{A note on the convergence of deterministic gradient sampling in nonsmooth optimization}}}, doi = {{10.1007/s10589-024-00552-0}}, year = {{2024}}, } @article{51518, abstract = {{In applications of piezoelectric actuators and sensors, the dependability and particularly the reliability throughout their lifetime are vital to manufacturers and end-users and are enabled through condition-monitoring approaches. Existing approaches often utilize impedance measurements over a range of frequencies or velocity measurements and require additional equipment or sensors, such as a laser Doppler vibrometer. Furthermore, the non-negligible effects of varying operating conditions are often unconsidered. To minimize the need for additional sensors while maintaining the dependability of piezoelectric bending actuators irrespective of varying operating conditions, an online diagnostics approach is proposed. To this end, time- and frequency-domain features are extracted from monitored current signals to reflect hairline crack development in bending actuators. For validation of applicability, the presented analysis method was evaluated on piezoelectric bending actuators subjected to accelerated lifetime tests at varying voltage amplitudes and under external damping conditions. In the presence of a crack and due to a diminished stiffness, the resonance frequency decreases and the root-mean-square amplitude of the current signal simultaneously abruptly drops during the lifetime tests. Furthermore, the piezoelectric crack surfaces clapping is reflected in higher harmonics of the current signal. Thus, time-domain features and harmonics of the current signals are sufficient to diagnose hairline cracks in the actuators.}}, author = {{Aimiyekagbon, Osarenren Kennedy and Bender, Amelie and Hemsel, Tobias and Sextro, Walter}}, issn = {{2079-9292}}, journal = {{Electronics}}, keywords = {{piezoelectric transducer, self-sensing, fault detection, diagnostics, hairline crack, condition monitoring}}, number = {{3}}, publisher = {{MDPI AG}}, title = {{{Diagnostics of Piezoelectric Bending Actuators Subjected to Varying Operating Conditions}}}, doi = {{10.3390/electronics13030521}}, volume = {{13}}, year = {{2024}}, } @article{52726, abstract = {{Heteroclinic structures organize global features of dynamical systems. We analyse whether heteroclinic structures can arise in network dynamics with higher-order interactions which describe the nonlinear interactions between three or more units. We find that while commonly analysed model equations such as network dynamics on undirected hypergraphs may be useful to describe local dynamics such as cluster synchronization, they give rise to obstructions that allow to design of heteroclinic structures in phase space. By contrast, directed hypergraphs break the homogeneity and lead to vector fields that support heteroclinic structures.}}, author = {{Bick, Christian and von der Gracht, Sören}}, issn = {{2051-1329}}, journal = {{Journal of Complex Networks}}, keywords = {{Applied Mathematics, Computational Mathematics, Control and Optimization, Management Science and Operations Research, Computer Networks and Communications}}, number = {{2}}, publisher = {{Oxford University Press (OUP)}}, title = {{{Heteroclinic dynamics in network dynamical systems with higher-order interactions}}}, doi = {{10.1093/comnet/cnae009}}, volume = {{12}}, year = {{2024}}, } @inproceedings{47522, abstract = {{Artificial benchmark functions are commonly used in optimization research because of their ability to rapidly evaluate potential solutions, making them a preferred substitute for real-world problems. However, these benchmark functions have faced criticism for their limited resemblance to real-world problems. In response, recent research has focused on automatically generating new benchmark functions for areas where established test suites are inadequate. These approaches have limitations, such as the difficulty of generating new benchmark functions that exhibit exploratory landscape analysis (ELA) features beyond those of existing benchmarks.The objective of this work is to develop a method for generating benchmark functions for single-objective continuous optimization with user-specified structural properties. Specifically, we aim to demonstrate a proof of concept for a method that uses an ELA feature vector to specify these properties in advance. To achieve this, we begin by generating a random sample of decision space variables and objective values. We then adjust the objective values using CMA-ES until the corresponding features of our new problem match the predefined ELA features within a specified threshold. By iteratively transforming the landscape in this way, we ensure that the resulting function exhibits the desired properties. To create the final function, we use the resulting point cloud as training data for a simple neural network that produces a function exhibiting the target ELA features. We demonstrate the effectiveness of this approach by replicating the existing functions of the well-known BBOB suite and creating new functions with ELA feature values that are not present in BBOB.}}, author = {{Prager, Raphael Patrick and Dietrich, Konstantin and Schneider, Lennart and Schäpermeier, Lennart and Bischl, Bernd and Kerschke, Pascal and Trautmann, Heike and Mersmann, Olaf}}, booktitle = {{Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}}, isbn = {{9798400702020}}, keywords = {{Benchmarking, Instance Generator, Black-Box Continuous Optimization, Exploratory Landscape Analysis, Neural Networks}}, pages = {{129–139}}, publisher = {{Association for Computing Machinery}}, title = {{{Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features}}}, doi = {{10.1145/3594805.3607136}}, year = {{2023}}, } @inproceedings{48869, abstract = {{Evolutionary algorithms have been shown to obtain good solutions for complex optimization problems in static and dynamic environments. It is important to understand the behaviour of evolutionary algorithms for complex optimization problems that also involve dynamic and/or stochastic components in a systematic way in order to further increase their applicability to real-world problems. We investigate the node weighted traveling salesperson problem (W-TSP), which provides an abstraction of a wide range of weighted TSP problems, in dynamic settings. In the dynamic setting of the problem, items that have to be collected as part of a TSP tour change over time. We first present a dynamic setup for the dynamic W-TSP parameterized by different types of changes that are applied to the set of items to be collected when traversing the tour. Our first experimental investigations study the impact of such changes on resulting optimized tours in order to provide structural insights of optimization solutions. Afterwards, we investigate simple mutation-based evolutionary algorithms and study the impact of the mutation operators and the use of populations with dealing with the dynamic changes to the node weights of the problem.}}, author = {{Bossek, Jakob and Neumann, Aneta and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{9798400701191}}, keywords = {{dynamic optimization, evolutionary algorithms, re-optimization, weighted traveling salesperson problem}}, pages = {{248–256}}, publisher = {{Association for Computing Machinery}}, title = {{{On the Impact of Basic Mutation Operators and Populations within Evolutionary Algorithms for the Dynamic Weighted Traveling Salesperson Problem}}}, doi = {{10.1145/3583131.3590384}}, year = {{2023}}, } @inbook{30289, abstract = {{This chapter presents a discussion of the concept of agency. Agency is understood as a multifaceted construct describing the idea that human beings make choices, act on these choices, and thereby exercise influence on their own lives as well as their environment. We argue that the concept is discussed from three different perspectives in the literature—transformational, dispositional, and relational—that are each related to learning and development in work contexts. These perspectives do not reflect incompatible positions but rather different aspects of the same phenomena. The chapter also offers an avenue of insight into empirical studies that employ agency as a central concept as well as discussions about concepts that closely overlap with ideas of human beings as agents of power and influence.}}, author = {{Goller, Michael and Paloniemi, Susanna}}, booktitle = {{Research Approaches on Workplace Learning}}, isbn = {{9783030895815}}, issn = {{2210-5549}}, keywords = {{Agency Workplace learning Professional development Proactivity Self-direction}}, publisher = {{Springer International Publishing}}, title = {{{Agency: Taking Stock of Workplace Learning Research}}}, doi = {{10.1007/978-3-030-89582-2_1}}, year = {{2022}}, } @inbook{30290, abstract = {{The article explores the particular quality of changes introduced through the latest wave of digital transformation of workplaces. It has effects on workflow processes, on distribution of work and tasks, and the mode of distributing working tasks, e.g. through cyber-physical systems. Hence, the changes in work are manifold and require changes in vocational education and training as well as in workplace learning. These changes reveal new challenges for research on workplace learning. Finally, conclusions for future workplace learning research will be developed.}}, author = {{Harteis, Christian}}, booktitle = {{Research Approaches on Workplace Learning}}, isbn = {{9783030895815}}, issn = {{2210-5549}}, keywords = {{Digitalisation Self organisation Distribution of labour Automation}}, publisher = {{Springer International Publishing}}, title = {{{Research on Workplace Learning in Times of Digitalisation}}}, doi = {{10.1007/978-3-030-89582-2_19}}, year = {{2022}}, } @article{30861, abstract = {{AbstractWe consider the problem of maximization of metabolite production in bacterial cells formulated as a dynamical optimal control problem (DOCP). According to Pontryagin’s maximum principle, optimal solutions are concatenations of singular and bang arcs and exhibit the chattering or Fuller phenomenon, which is problematic for applications. To avoid chattering, we introduce a reduced model which is still biologically relevant and retains the important structural features of the original problem. Using a combination of analytical and numerical methods, we show that the singular arc is dominant in the studied DOCPs and exhibits the turnpike property. This property is further used in order to design simple and realistic suboptimal control strategies.}}, author = {{Caillau, Jean-Baptiste and Djema, Walid and Gouzé, Jean-Luc and Maslovskaya, Sofya and Pomet, Jean-Baptiste}}, issn = {{0022-3239}}, journal = {{Journal of Optimization Theory and Applications}}, keywords = {{Applied Mathematics, Management Science and Operations Research, Control and Optimization}}, publisher = {{Springer Science and Business Media LLC}}, title = {{{Turnpike Property in Optimal Microbial Metabolite Production}}}, doi = {{10.1007/s10957-022-02023-0}}, year = {{2022}}, } @inproceedings{29842, abstract = {{To build successful software products, developers continuously have to discover what features the users really need. This discovery can be achieved with continuous experimentation, testing different software variants with distinct user groups, and deploying the superior variant for all users. However, existing approaches do not focus on explicit modeling of variants and experiments, which offers advantages such as traceability of decisions and combinability of experiments. Therefore, our vision is the provision of model-driven continuous experimentation, which provides the developer with a framework for structuring the experimentation process. For that, we introduce the overall concept, apply it to the experimentation on component-based software architectures and point out future research questions. In particular, we show the applicability by combining feature models for modeling the software variants, users, and experiments (i.e., model-driven) with MAPE-K for the adaptation (i.e., continuous experimentation) and implementing the concept based on the component-based Angular framework.}}, author = {{Gottschalk, Sebastian and Yigitbas, Enes and Engels, Gregor}}, booktitle = {{Proceedings of the 18th International Conference on Software Architecture Companion }}, keywords = {{continuous experimentation, model-driven, component-based software architectures, self-adaptation}}, location = {{Hawaii}}, publisher = {{IEEE}}, title = {{{Model-driven Continuous Experimentation on Component-based Software Architectures }}}, doi = {{10.1109/ICSA-C54293.2022.00011}}, year = {{2022}}, } @article{34614, abstract = {{Mit steigenden Optimierungsanforderungen an das Individuum wächst auch das indivi- duelle Bedürfnis nach Kontrolle. Dieses kann u. a. durch self tracking-Technologien erfüllt werden. Anhand von drei Fallbeispielen – der Personenwaage, dem Wearable und dem habit tracker – zeigt dieser Aufsatz, wie sich medienbasierte Selbsttechnologien im historischen Verlauf intensiviert und stärker in den Alltag integriert haben. Ein besonderer Fokus liegt dabei auf der Ambivalenz dieser Medien: Ermöglichen sie auf der einen Seite zwar eine Selbstkontrolle und stellen so potenziell sta- bilisierende Ressourcen für das Individuum dar, schaffen sie auf der anderen Seite auch neue Anforderungen, die es zu erfüllen gilt.}}, author = {{Schloots, Franziska Margarete}}, journal = {{ffk Journal}}, keywords = {{self-tracking, Selbsttechnologien, Wearable, Bullet Journal, Personenwaage, Selbstvermessung}}, number = {{7}}, pages = {{74--91}}, title = {{{‚Understand what’s happening within‘. Selbstkontrolle mit Personenwaage, Wearable und habit tracker}}}, doi = {{10.25969/MEDIAREP/18238}}, volume = {{6}}, year = {{2022}}, } @article{35136, abstract = {{Im Zentrum dieses Beitrags stehen Ergebnisse der Messung pädagogischer Kompetenzen Studierender der Theologie, die das Praxissemester in Deutschland absolviert haben. Das bildungswissenschaftliche Wissen, Kompetenzselbsteinschätzungen und ihre Entwicklung sowie die Einschätzung der im Praxissemester erreichten Ziele Studierender werden dabei unter Berücksichtigung der Ausrichtung des Lehramtsstudiums auf eine Schulform betrachtet. Um die Ergebnisse der Messung bildungswissenschaftlichen Wissens und die der Messung von Kompetenzselbsteinschätzungen zu kontextualisieren (N = 304), wird zuerst die Relevanz des (bildungswissenschaftlichen) Wissens als Ausgangspunkt des Könnens herausgearbeitet. Daran anschließend werden Befunde zur schulformspezifischen Professionalisierung resümiert. Anschließend werden Hypothesen hergeleitet, die Anlage der Studie sowie die Testinstrumente vorge- stellt, die Ergebnisse präsentiert und diskutiert. Die Ergebnisse zeigen wider Erwarten, dass sich weder das bildungswissenschaftliche Wissen, die Kompetenzselbsteinschätzungen und ihre Entwicklung noch die Einschätzung der im Praxissemester erreichten Ziele angehender Lehrkräfte in Abhängigkeit der Schulformen unterscheiden. Die Diskussion bezieht sich u.a. auf die Struktur der Lehramtsstudiengänge, die Denkfiguren zur Entwicklung von Können und die Konzeption der Messinstrumente. }}, author = {{Caruso, Carina and Seifert, Andreas}}, issn = {{1018-1539}}, journal = {{Österreichische Religionspädagogische Forum}}, keywords = {{Bildungswissenschaftliches Wissen, Kompetenzmessung, Kompetenzselbsteinschätzung, Praxissemester, Professionalisierung / competence measurement, competence self-assessment, educational knowledge, internship, professionalization}}, number = {{1}}, pages = {{239--260}}, publisher = {{Universitätsbibliothek Graz}}, title = {{{ Inwiefern ist die Professionalisierung in Praxisphasen schulformspezifisch?}}}, doi = {{10.30:2022.1.14}}, volume = {{30}}, year = {{2022}}, } @article{35137, abstract = {{Im Zentrum dieses Beitrags stehen Ergebnisse der Messung pädagogischer Kompetenzen Studierender. Dabei werden sowohl das bildungswissenschaftliche Wissen als auch die Entwicklung der Kompe­tenzselbsteinschätzungen in den Bereichen Unterrichten, Erziehen, Beurteilen und Innovieren unter Berücksichtigung individueller Voraussetzungen (Alter, Geschlecht, Abiturnote, Bachelornote, Konfession) betrachtet. Um die Ergeb­nisse hinsichtlich ihrer Bedeutung für die Professionalisierung angehender Lehrkräfte diskutieren zu können, wird, den empirischen Erkenntnissen voranstehend, die Bedeutung von Wissen für berufliches Können herausgearbeitet. Daran anschließend werden Hypothesen hergeleitet, die Anlage der Studie sowie die Testinstrumente vorgestellt, die Ergebnisse präsentiert und diskutiert. Die Ergebnisse zeigen, dass die Abitur- und Bachelornote die Varianz hinsichtlich des pädagogischen Wissens aufklären, sich eine signifikante Entwicklung der Kompetenzselbsteinschätzungen angehender Lehrkräfte feststellen lässt, aber sich angehende Religionslehrkräfte kaum von anderen Studierenden unterscheiden. Die Diskussion nimmt u. a. Rückbezug auf die Denkfiguren zur Entwicklung berufli­chen Könnens und benennt Limitationen, die mit der Studie und Kompetenzmessungen verbunden sind. Daran schließt die Formulierung eines Ausblicks an. Der Beitrag zielt insbesondere darauf, repräsentative Ergebnisse der Kompetenzmessung zu präsentieren und dabei potenzielle Einflussfaktoren auf die studentische Kompetenzent­wicklung zu beleuchten. Ein dadurch angereichertes Konglomerat belastbarer Erkenntnisse zielt darauf, langfristig zur Ableitung lehrerbildungsdidaktischer Überlegungen herangezogen werden zu können, die die studentische Professionalisierung unterstützen.}}, author = {{Caruso, Carina and Seifert, Andreas}}, issn = {{2750 - 3941}}, journal = {{Religionspädagogische Beiträge. Journal for Religion in Education }}, keywords = {{Bildungswissenschaftliches Wissen, Kompetenzmessung, Kompetenzselbsteinschätzung, Praxissemester, Professionalisierung / competence measurement, competence self-assessment, educational knowledge, internship, professionalization}}, number = {{1}}, pages = {{3--15}}, publisher = {{University of Bamberg Press}}, title = {{{Pädagogische Kompetenz als Ausgangspunkt beruflichen Könnens!? Ergebnisse der Kompetenzmessung angehender Lehrkräfte unter Berücksichtigung individueller Voraussetzungen}}}, doi = {{10.20377/rpb-101}}, volume = {{45}}, year = {{2022}}, } @article{35206, author = {{Bonnard, Bernard and Rouot, Jérémy and Wembe Moafo, Boris Edgar}}, issn = {{2156-8472}}, journal = {{Mathematical Control and Related Fields}}, keywords = {{Applied Mathematics, Control and Optimization, General Medicine}}, pages = {{0--0}}, publisher = {{American Institute of Mathematical Sciences (AIMS)}}, title = {{{Accessibility properties of abnormal geodesics in optimal control illustrated by two case studies}}}, doi = {{10.3934/mcrf.2022052}}, year = {{2022}}, } @inproceedings{40046, abstract = {{Theoretical approaches to the transformation towards an inclusive educational system in Germany mostly agree on the involvement of developmental tasks in subject related research (Hinz, 2011). The common understanding of inclusion as a process geared towards equal participation of all children (Booth, 2012) requires a reflexive questioning of established values, attitudes and practices in order to develop inclusive subject related research, teacher training and teaching and learning (Pech & Schomaker, 2013). Among other things, this results in consequences for the design of pre- service teacher training. To a large extent, teacher education is driven by the promotion of central competencies, interests and self-efficacy (Baumert & Kunter, 2011). It aims towards the development and realisation of inclusive interdisciplinary science and social studies (‘Sachunterricht’) in primary education (Moser, 2018). In conjunction with largely acknowledged constructivist approaches to teaching and learning (Möller, 2001), the development of personality, the consideration of basic needs (Deci & Ryan, 1993) and promotion of individual potentials are repeatedly fundamentally represented in subject related and pedagogical considerations (Feuser, 1989; GDSU, 2013). Therefore, the aforementioned constructivist approach is connected to several certain key paradigms for teaching and learning processes (e.g., Vygotskij, 1978; Posner et al., 1982; van de Pol et al., 2010). In this regard, the nature of primary school students’ basic needs have empirically not been sufficiently studied yet. Theoretical frameworks from motivational psychology (Deci & Ryan, 1993) do not explicitly address how individual needs differ and how the diversity of needs can be included in joint-learning, multi-perspective technology education classes. The research project the present paper is part of aims to develop a research-based concept for the professionalisation of pre-service teachers in a seminar course. Therefore, the promotion of the pre- service teachers’ interests and self-efficacy expectations have been assessed in a pre-post research design with a control group visiting another course not related to technology education and inclusion. The present paper describes and discusses first results of the project and will give an outlook on subsequent developmental tasks.}}, author = {{Schröer, Franz and Tenberge, Claudia}}, booktitle = {{PATT39 - PATT on the Edge Technology, Innovation and Education}}, editor = {{Gill, David and Tuff, Jim and Kennedy, Thomas and Pendergast, Shawn and Jamil, Sana}}, keywords = {{Inclusion, basic needs, pre-service teacher training, interest, self-efficacy}}, location = {{St. John’s, Newfoundland and Labrador, Canada}}, pages = {{49--57}}, title = {{{How to enable pre-service teachers to design technological teaching and learning inclusively? – On the nature and consideration of basic needs in teacher training}}}, year = {{2022}}, } @article{36083, author = {{Constantiou, Ioanna and Mukkamala, Alivelu and Sjöklint, Mimmi and Trier, Matthias}}, issn = {{0960-085X}}, journal = {{European Journal of Information Systems}}, keywords = {{Library and Information Sciences, Information Systems, Self-Tracking, User Behaviour, Discontinuance}}, pages = {{1--21}}, publisher = {{Informa UK Limited}}, title = {{{Engaging with self-tracking applications: how do users respond to their performance data?}}}, doi = {{10.1080/0960085x.2022.2081096}}, year = {{2022}}, } @inproceedings{31066, abstract = {{While trade-offs between modeling effort and model accuracy remain a major concern with system identification, resorting to data-driven methods often leads to a complete disregard for physical plausibility. To address this issue, we propose a physics-guided hybrid approach for modeling non-autonomous systems under control. Starting from a traditional physics-based model, this is extended by a recurrent neural network and trained using a sophisticated multi-objective strategy yielding physically plausible models. While purely data-driven methods fail to produce satisfying results, experiments conducted on real data reveal substantial accuracy improvements by our approach compared to a physics-based model. }}, author = {{Schön, Oliver and Götte, Ricarda-Samantha and Timmermann, Julia}}, booktitle = {{14th IFAC Workshop on Adaptive and Learning Control Systems (ALCOS 2022)}}, keywords = {{neural networks, physics-guided, data-driven, multi-objective optimization, system identification, machine learning, dynamical systems}}, location = {{Casablanca, Morocco}}, number = {{12}}, pages = {{19--24}}, title = {{{Multi-Objective Physics-Guided Recurrent Neural Networks for Identifying Non-Autonomous Dynamical Systems}}}, doi = {{https://doi.org/10.1016/j.ifacol.2022.07.282}}, volume = {{55}}, year = {{2022}}, } @article{32403, abstract = {{Due to failures or even the absence of an electricity grid, microgrid systems are becoming popular solutions for electrifying African rural communities. However, they are heavily stressed and complex to control due to their intermittency and demand growth. Demand side management (DSM) serves as an option to increase the level of flexibility on the demand side by scheduling users’ consumption patterns profiles in response to supply. This paper proposes a demand-side management strategy based on load shifting and peak clipping. The proposed approach was modelled in a MATLAB/Simulink R2021a environment and was optimized using the artificial neural network (ANN) algorithm. Simulations were carried out to test the model’s efficacy in a stand-alone PV-battery microgrid in East Africa. The proposed algorithm reduces the peak demand, smoothing the load profile to the desired level, and improves the system’s peak to average ratio (PAR). The presence of deferrable loads has been considered to bring more flexible demand-side management. Results promise decreases in peak demand and peak to average ratio of about 31.2% and 7.5% through peak clipping. In addition, load shifting promises more flexibility to customers.}}, author = {{Philipo, Godiana Hagile and Kakande, Josephine Nakato and Krauter, Stefan}}, issn = {{1996-1073}}, journal = {{Energies}}, keywords = {{Energy (miscellaneous), Energy Engineering and Power Technology, Renewable Energy, Sustainability and the Environment, Electrical and Electronic Engineering, Control and Optimization, Engineering (miscellaneous), Building and Construction}}, number = {{14}}, publisher = {{MDPI AG}}, title = {{{Neural Network-Based Demand-Side Management in a Stand-Alone Solar PV-Battery Microgrid Using Load-Shifting and Peak-Clipping}}}, doi = {{10.3390/en15145215}}, volume = {{15}}, year = {{2022}}, } @article{47961, abstract = {{Due to failures or even the absence of an electricity grid, microgrid systems are becoming popular solutions for electrifying African rural communities. However, they are heavily stressed and complex to control due to their intermittency and demand growth. Demand side management (DSM) serves as an option to increase the level of flexibility on the demand side by scheduling users’ consumption patterns profiles in response to supply. This paper proposes a demand-side management strategy based on load shifting and peak clipping. The proposed approach was modelled in a MATLAB/Simulink R2021a environment and was optimized using the artificial neural network (ANN) algorithm. Simulations were carried out to test the model’s efficacy in a stand-alone PV-battery microgrid in East Africa. The proposed algorithm reduces the peak demand, smoothing the load profile to the desired level, and improves the system’s peak to average ratio (PAR). The presence of deferrable loads has been considered to bring more flexible demand-side management. Results promise decreases in peak demand and peak to average ratio of about 31.2% and 7.5% through peak clipping. In addition, load shifting promises more flexibility to customers.}}, author = {{Philipo, Godiana Hagile and Kakande, Josephine Nakato and Krauter, Stefan}}, issn = {{1996-1073}}, journal = {{Energies}}, keywords = {{Energy (miscellaneous), Energy Engineering and Power Technology, Renewable Energy, Sustainability and the Environment, Electrical and Electronic Engineering, Control and Optimization, Engineering (miscellaneous), Building and Construction}}, number = {{14}}, publisher = {{MDPI AG}}, title = {{{Neural Network-Based Demand-Side Management in a Stand-Alone Solar PV-Battery Microgrid Using Load-Shifting and Peak-Clipping}}}, doi = {{10.3390/en15145215}}, volume = {{15}}, year = {{2022}}, } @inproceedings{29803, abstract = {{Ultrasonic wire bonding is a solid-state joining process used to form electrical interconnections in micro and power electronics and batteries. A high frequency oscillation causes a metallurgical bond deformation in the contact area. Due to the numerous physical influencing factors, it is very difficult to accurately capture this process in a model. Therefore, our goal is to determine a suitable feed-forward control strategy for the bonding process even without detailed model knowledge. We propose the use of batch constrained Bayesian optimization for the control design. Hence, Bayesian optimization is precisely adapted to the application of bonding: the constraint is used to check one quality feature of the process and the use of batches leads to more efficient experiments. Our approach is suitable to determine a feed-forward control for the bonding process that provides very high quality bonds without using a physical model. We also show that the quality of the Bayesian optimization based control outperforms random search as well as manual search by a user. Using a simple prior knowledge model derived from data further improves the quality of the connection. The Bayesian optimization approach offers the possibility to perform a sensitivity analysis of the control parameters, which allows to evaluate the influence of each control parameter on the bond quality. In summary, Bayesian optimization applied to the bonding process provides an excellent opportunity to develop a feedforward control without full modeling of the underlying physical processes.}}, author = {{Hesse, Michael and Hunstig, Matthias and Timmermann, Julia and Trächtler, Ansgar}}, booktitle = {{Proceedings of the 11th International Conference on Pattern Recognition Applications and Methods (ICPRAM)}}, isbn = {{978-989-758-549-4}}, keywords = {{Bayesian optimization, Wire bonding, Feed-forward control, model-free design}}, location = {{Online}}, pages = {{383--394}}, title = {{{Batch Constrained Bayesian Optimization for UltrasonicWire Bonding Feed-forward Control Design}}}, year = {{2022}}, } @inproceedings{48882, abstract = {{In multimodal multi-objective optimization (MMMOO), the focus is not solely on convergence in objective space, but rather also on explicitly ensuring diversity in decision space. We illustrate why commonly used diversity measures are not entirely appropriate for this task and propose a sophisticated basin-based evaluation (BBE) method. Also, BBE variants are developed, capturing the anytime behavior of algorithms. The set of BBE measures is tested by means of an algorithm configuration study. We show that these new measures also transfer properties of the well-established hypervolume (HV) indicator to the domain of MMMOO, thus also accounting for objective space convergence. Moreover, we advance MMMOO research by providing insights into the multimodal performance of the considered algorithms. Specifically, algorithms exploiting local structures are shown to outperform classical evolutionary multi-objective optimizers regarding the BBE variants and respective trade-off with HV.}}, author = {{Heins, Jonathan and Rook, Jeroen and Schäpermeier, Lennart and Kerschke, Pascal and Bossek, Jakob and Trautmann, Heike}}, booktitle = {{Parallel Problem Solving from Nature (PPSN XVII)}}, editor = {{Rudolph, Günter and Kononova, Anna V. and Aguirre, Hernán and Kerschke, Pascal and Ochoa, Gabriela and Tusar, Tea}}, isbn = {{978-3-031-14714-2}}, keywords = {{Anytime behavior, Benchmarking, Continuous optimization, Multi-objective optimization, Multimodality, Performance metric}}, pages = {{192–206}}, publisher = {{Springer International Publishing}}, title = {{{BBE: Basin-Based Evaluation of Multimodal Multi-objective Optimization Problems}}}, doi = {{10.1007/978-3-031-14714-2_14}}, year = {{2022}}, } @inproceedings{48896, abstract = {{Hardness of Multi-Objective (MO) continuous optimization problems results from an interplay of various problem characteristics, e. g. the degree of multi-modality. We present a benchmark study of classical and diversity focused optimizers on multi-modal MO problems based on automated algorithm configuration. We show the large effect of the latter and investigate the trade-off between convergence in objective space and diversity in decision space.}}, author = {{Rook, Jeroen and Trautmann, Heike and Bossek, Jakob and Grimme, Christian}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference Companion}}, isbn = {{978-1-4503-9268-6}}, keywords = {{configuration, multi-modality, multi-objective optimization}}, pages = {{356–359}}, publisher = {{Association for Computing Machinery}}, title = {{{On the Potential of Automated Algorithm Configuration on Multi-Modal Multi-Objective Optimization Problems}}}, doi = {{10.1145/3520304.3528998}}, year = {{2022}}, } @article{21004, abstract = {{Automated machine learning (AutoML) supports the algorithmic construction and data-specific customization of machine learning pipelines, including the selection, combination, and parametrization of machine learning algorithms as main constituents. Generally speaking, AutoML approaches comprise two major components: a search space model and an optimizer for traversing the space. Recent approaches have shown impressive results in the realm of supervised learning, most notably (single-label) classification (SLC). Moreover, first attempts at extending these approaches towards multi-label classification (MLC) have been made. While the space of candidate pipelines is already huge in SLC, the complexity of the search space is raised to an even higher power in MLC. One may wonder, therefore, whether and to what extent optimizers established for SLC can scale to this increased complexity, and how they compare to each other. This paper makes the following contributions: First, we survey existing approaches to AutoML for MLC. Second, we augment these approaches with optimizers not previously tried for MLC. Third, we propose a benchmarking framework that supports a fair and systematic comparison. Fourth, we conduct an extensive experimental study, evaluating the methods on a suite of MLC problems. We find a grammar-based best-first search to compare favorably to other optimizers.}}, author = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}}, issn = {{0162-8828}}, journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}}, keywords = {{Automated Machine Learning, Multi Label Classification, Hierarchical Planning, Bayesian Optimization}}, pages = {{1--1}}, title = {{{AutoML for Multi-Label Classification: Overview and Empirical Evaluation}}}, doi = {{10.1109/tpami.2021.3051276}}, year = {{2021}}, } @article{21808, abstract = {{Modern services consist of interconnected components,e.g., microservices in a service mesh or machine learning functions in a pipeline. These services can scale and run across multiple network nodes on demand. To process incoming traffic, service components have to be instantiated and traffic assigned to these instances, taking capacities, changing demands, and Quality of Service (QoS) requirements into account. This challenge is usually solved with custom approaches designed by experts. While this typically works well for the considered scenario, the models often rely on unrealistic assumptions or on knowledge that is not available in practice (e.g., a priori knowledge). We propose DeepCoord, a novel deep reinforcement learning approach that learns how to best coordinate services and is geared towards realistic assumptions. It interacts with the network and relies on available, possibly delayed monitoring information. Rather than defining a complex model or an algorithm on how to achieve an objective, our model-free approach adapts to various objectives and traffic patterns. An agent is trained offline without expert knowledge and then applied online with minimal overhead. Compared to a state-of-the-art heuristic, DeepCoord significantly improves flow throughput (up to 76%) and overall network utility (more than 2x) on realworld network topologies and traffic traces. It also supports optimizing multiple, possibly competing objectives, learns to respect QoS requirements, generalizes to scenarios with unseen, stochastic traffic, and scales to large real-world networks. For reproducibility and reuse, our code is publicly available.}}, author = {{Schneider, Stefan Balthasar and Khalili, Ramin and Manzoor, Adnan and Qarawlus, Haydar and Schellenberg, Rafael and Karl, Holger and Hecker, Artur}}, journal = {{Transactions on Network and Service Management}}, keywords = {{network management, service management, coordination, reinforcement learning, self-learning, self-adaptation, multi-objective}}, publisher = {{IEEE}}, title = {{{Self-Learning Multi-Objective Service Coordination Using Deep Reinforcement Learning}}}, doi = {{10.1109/TNSM.2021.3076503}}, year = {{2021}}, } @techreport{33854, abstract = {{Macrodiversity is a key technique to increase the capacity of mobile networks. It can be realized using coordinated multipoint (CoMP), simultaneously connecting users to multiple overlapping cells. Selecting which users to serve by how many and which cells is NP-hard but needs to happen continuously in real time as users move and channel state changes. Existing approaches often require strict assumptions about or perfect knowledge of the underlying radio system, its resource allocation scheme, or user movements, none of which is readily available in practice. Instead, we propose three novel self-learning and self-adapting approaches using model-free deep reinforcement learning (DRL): DeepCoMP, DD-CoMP, and D3-CoMP. DeepCoMP leverages central observations and control of all users to select cells almost optimally. DD-CoMP and D3-CoMP use multi-agent DRL, which allows distributed, robust, and highly scalable coordination. All three approaches learn from experience and self-adapt to varying scenarios, reaching 2x higher Quality of Experience than other approaches. They have very few built-in assumptions and do not need prior system knowledge, making them more robust to change and better applicable in practice than existing approaches.}}, author = {{Schneider, Stefan Balthasar and Karl, Holger and Khalili, Ramin and Hecker, Artur}}, keywords = {{mobility management, coordinated multipoint, CoMP, cell selection, resource management, reinforcement learning, multi agent, MARL, self-learning, self-adaptation, QoE}}, title = {{{DeepCoMP: Coordinated Multipoint Using Multi-Agent Deep Reinforcement Learning}}}, year = {{2021}}, } @article{32558, abstract = {{With the rapid progress of technological development, self-efficacy in reference to digital devices (i.e., information and computer technology [ICT] self-efficacy) is an important driver that helps students to deal with technological problems and support their lifelong learning processes. Schools, peers, and home learning environments are important sources for the development of positive self-efficacy. Expanding on previous research, we investigated the associations between different aspects of the digital home learning environment and students’ ICT self-efficacy. The moderation effects of gender were also tested. A total of 651 children answered a questionnaire about different digital home learning environment dimensions and estimated their ICT self-efficacy using an adapted scale—Schwarzer and Jerusalem’s (1999) general self-efficacy scale. Using the structural equation modeling technique, a digital home learning environment containing six different qualities of parental support was investigated. Families’ cultural capital, parents’ attitudes toward the Internet, and shared Internet activities at home contributed positively to ICT self-efficacy. We observed small gender differences, with the moderation effect being nonsignificant. The results help researchers and practitioners to understand how different dimensions of the digital home learning environment support ICT self-efficacy. We will discuss how parents can enhance the home learning environment and how teachers can integrate this knowledge into formal education.}}, author = {{Bonanati, Sabrina and Buhl, Heike M.}}, issn = {{1387-1579}}, journal = {{Learning Environments Research}}, keywords = {{Digital media use, Gender, Home learning environment, ICT self-efcacy, Motivation, Parental involvement}}, number = {{2}}, pages = {{485--505}}, publisher = {{Springer Science and Business Media LLC}}, title = {{{The digital home learning environment and its relation to children’s ICT self-efficacy}}}, doi = {{10.1007/s10984-021-09377-8}}, volume = {{25}}, year = {{2021}}, } @techreport{37136, abstract = {{This study examines the relation between voluntary audit and the cost of debt in private firms. We use a sample of 4,058 small private firms operating in the period 2006‐2017 that are not subject to mandatory audits. Firms decide for a voluntary audit of financial statements either because the economic setting in which they operate effectively forces them to do so (e.g., ownership complexity, export‐oriented supply chain, subsidiary status) or because firm fundamentals and/or financial reporting practices limit their access to financial debt, both reflected in earnings quality. We use these factors to model the decision for voluntary audit. In the outcome analyses, we find robust evidence that voluntary audits are associated with higher, rather than lower, interest rate by up to 3.0 percentage points. This effect is present regardless of the perceived audit quality (Big‐4 vs. non‐Big‐4), but is stronger for non‐Big‐4 audits where auditees have a stronger position relative to auditors. Audited firms’ earnings are less informative about future operating performance relative to unaudited counterparts. We conclude that voluntary audits facilitate access to financial debt for firms with higher risk that may otherwise have no access to this form of financing. The price paid is reflected in higher interest rates charged to firms with voluntary audits – firms with higher information and/or fundamental risk.}}, author = {{Ichev, Riste and Koren, Jernej and Kosi, Urska and Sitar Sustar, Katarina and Valentincic, Aljosa}}, keywords = {{private firms, voluntary audit, cost of debt, self‐selection bias, risk}}, title = {{{Cost of Debt for Private Firms Revisited: Voluntary Audits as a Reflection of Risk}}}, year = {{2021}}, } @inbook{22930, abstract = {{Self-piercing riveting is an established technique for joining multi-material structures in car body manufacturing. Rivets for self-piercing riveting differ in their geometry, the material used, the condition of the material and their surface condition. To shorten the manufacturing process by omitting the heat treatment and the coating process, the authors have elaborated a concept for the use of stainless steel with high strain hardening as a rivet material. The focus of the present investigation is on the evaluation of the influences of the rivet’s geometry and material on its deformation behaviour. Conventional rivets of types P and HD2, a rivet with an improved geometry made of treatable steel 38B2, and rivets made of the stainless steels 1.3815 and 1.4541 are examined. The analysis is conducted by means of multi-step joining tests for two material combinations comprising high-strength steel HCT70X and aluminium EN AW-5083. The joints are cut to provide a cross-section and the deformation behaviour of the different rivets is analysed on the basis of the measured changes in geometry and hardness. In parallel, an examination of the force-stroke curves provides further insights. It can be demonstrated that, besides the geometry, the material strength, in particular, has a significant influence on the deformation behaviour of the rivet. The strength of steel 1.4541 is seen to be too low for the joining task, while the strength of steel 1.3815 is sufficient, and hence the investigation confirms the capability of rivets made of 1.3815 for joining even challenging material combinations.}}, author = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}}, booktitle = {{Forming the Future - Proceedings of the 13th International Conference on the Technology of Plasticity. The Minerals, Metals & Materials Series.}}, editor = {{Daehn, Glenn and Cao, Jian and Kinsey, Brad and Tekkaya, Erman and Vivek, Anupam and Yoshida, Yoshinori}}, keywords = {{Self-piercing riveting, Lightweight design, Deformation behaviour, Stainless steel, High nitrogen steel}}, pages = {{1495--1506}}, publisher = {{Springer}}, title = {{{Self-Piercing Riveting Using Rivets Made of Stainless Steel with High Strain Hardening}}}, doi = {{10.1007/978-3-030-75381-8_124}}, year = {{2021}}, } @inproceedings{22274, abstract = {{The use of high-strength steel and aluminium is rising due to the intensified efforts being made in lightweight design, and self-piercing riveting is becoming increasingly important. Conventional rivets for self-piercing riveting differ in their geometry, the material used, the condition of the material and the coating. To shorten the manufacturing process, the use of stainless steel with high strain hardening as the rivet material represents a promising approach. This allows the coating of the rivets to be omitted due to the corrosion resistance of the material and, since the strength of the stainless steel is achieved by cold forming, heat treatment is no longer required. In addition, it is possible to adjust the local strength within the rivet. Because of that, the authors have elaborated a concept for using high nitrogen steel 1.3815 as the rivet material. The present investigation focusses on the joint strength in order to evaluate the capability of rivets in high nitrogen steel by comparison to conventional rivets made of treatable steel. Due to certain challenges in the forming process of the high nitrogen steel rivets, deviations result from the targeted rivet geometry. Mainly these deviations cause a lower joint strength with these rivets, which is, however, adequate. All in all, the capability of the new rivet is proven by the results of this investigation. }}, author = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}}, keywords = {{Self-piercing Riveting, Joining Technology, Rivet Geometry, Rivet Material, High Nitrogen Steel, Joint Strength}}, location = {{Liège, Belgien}}, title = {{{Strength of self-piercing riveted Joints with conventional Rivets and Rivets made of High Nitrogen Steel}}}, doi = {{10.25518/esaform21.1911}}, year = {{2021}}, } @article{21436, abstract = {{Ultrasonic wire bonding is a solid-state joining process, used in the electronics industry to form electrical connections, e.g. to connect electrical terminals within semiconductor modules. Many process parameters affect the bond strength, such like the bond normal force, ultrasonic power, wire material and bonding frequency. Today, process design, development, and optimization is most likely based on the knowledge of process engineers and is mainly performed by experimental testing. In this contribution, a newly developed simulation tool is presented, to reduce time and costs and efficiently determine optimized process parameter. Based on a co-simulation of MATLAB and ANSYS, the different physical phenomena of the wire bonding process are considered using finite element simulation for the complex plastic deformation of the wire and reduced order models for the transient dynamics of the transducer, wire, substrate and bond formation. The model parameters such as the coefficients of friction between bond tool and wire and between wire and substrate were determined for aluminium and copper wire in experiments with a test rig specially developed for the requirements of heavy wire bonding. To reduce simulation time, for the finite element simulation a restart analysis and high performance computing is utilized. Detailed analysis of the bond formation showed, that the normal pressure distribution in the contact between wire and substrate has high impact on bond formation and distribution of welded areas in the contact area.}}, author = {{Schemmel, Reinhard and Krieger, Viktor and Hemsel, Tobias and Sextro, Walter}}, issn = {{0026-2714}}, journal = {{Microelectronics Reliability}}, keywords = {{Ultrasonic heavy wire bonding, Co-simulation, ANSYS, MATLAB, Process optimization, Friction coefficient, Copper-copper, Aluminium-copper}}, pages = {{114077}}, title = {{{Co-simulation of MATLAB and ANSYS for ultrasonic wire bonding process optimization}}}, doi = {{https://doi.org/10.1016/j.microrel.2021.114077}}, volume = {{119}}, year = {{2021}}, } @article{46318, abstract = {{Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments.}}, author = {{Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}}, issn = {{0305-0548}}, journal = {{Computers & Operations Research}}, keywords = {{Multimodal optimization, Multi-objective continuous optimization, Landscape analysis, Visualization, Benchmarking, Theory, Algorithms}}, pages = {{105489}}, title = {{{Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}}}, doi = {{https://doi.org/10.1016/j.cor.2021.105489}}, volume = {{136}}, year = {{2021}}, } @inproceedings{48853, abstract = {{In practise, it is often desirable to provide the decision-maker with a rich set of diverse solutions of decent quality instead of just a single solution. In this paper we study evolutionary diversity optimization for the knapsack problem (KP). Our goal is to evolve a population of solutions that all have a profit of at least (1 - {$ϵ$}) {$\cdot$} OPT, where OPT is the value of an optimal solution. Furthermore, they should differ in structure with respect to an entropy-based diversity measure. To this end we propose a simple ({$\mu$} + 1)-EA with initial approximate solutions calculated by a well-known FPTAS for the KP. We investigate the effect of different standard mutation operators and introduce biased mutation and crossover which puts strong probability on flipping bits of low and/or high frequency within the population. An experimental study on different instances and settings shows that the proposed mutation operators in most cases perform slightly inferior in the long term, but show strong benefits if the number of function evaluations is severely limited.}}, author = {{Bossek, Jakob and Neumann, Aneta and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-8350-9}}, keywords = {{evolutionary algorithms, evolutionary diversity optimization, knapsack problem, tailored operators}}, pages = {{556–564}}, publisher = {{Association for Computing Machinery}}, title = {{{Breeding Diverse Packings for the Knapsack Problem by Means of Diversity-Tailored Evolutionary Algorithms}}}, doi = {{10.1145/3449639.3459364}}, year = {{2021}}, } @inproceedings{48860, abstract = {{In the area of evolutionary computation the calculation of diverse sets of high-quality solutions to a given optimization problem has gained momentum in recent years under the term evolutionary diversity optimization. Theoretical insights into the working principles of baseline evolutionary algorithms for diversity optimization are still rare. In this paper we study the well-known Minimum Spanning Tree problem (MST) in the context of diversity optimization where population diversity is measured by the sum of pairwise edge overlaps. Theoretical results provide insights into the fitness landscape of the MST diversity optimization problem pointing out that even for a population of {$\mu$} = 2 fitness plateaus (of constant length) can be reached, but nevertheless diverse sets can be calculated in polynomial time. We supplement our theoretical results with a series of experiments for the unconstrained and constraint case where all solutions need to fulfill a minimal quality threshold. Our results show that a simple ({$\mu$} + 1)-EA can effectively compute a diversified population of spanning trees of high quality.}}, author = {{Bossek, Jakob and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-8350-9}}, keywords = {{evolutionary algorithms, evolutionary diversity optimization, minimum spanning tree, runtime analysis}}, pages = {{198–206}}, publisher = {{Association for Computing Machinery}}, title = {{{Evolutionary Diversity Optimization and the Minimum Spanning Tree Problem}}}, doi = {{10.1145/3449639.3459363}}, year = {{2021}}, } @article{48854, abstract = {{We contribute to the theoretical understanding of randomized search heuristics for dynamic problems. We consider the classical vertex coloring problem on graphs and investigate the dynamic setting where edges are added to the current graph. We then analyze the expected time for randomized search heuristics to recompute high quality solutions. The (1+1) Evolutionary Algorithm and RLS operate in a setting where the number of colors is bounded and we are minimizing the number of conflicts. Iterated local search algorithms use an unbounded color palette and aim to use the smallest colors and, consequently, the smallest number of colors. We identify classes of bipartite graphs where reoptimization is as hard as or even harder than optimization from scratch, i.e., starting with a random initialization. Even adding a single edge can lead to hard symmetry problems. However, graph classes that are hard for one algorithm turn out to be easy for others. In most cases our bounds show that reoptimization is faster than optimizing from scratch. We further show that tailoring mutation operators to parts of the graph where changes have occurred can significantly reduce the expected reoptimization time. In most settings the expected reoptimization time for such tailored algorithms is linear in the number of added edges. However, tailored algorithms cannot prevent exponential times in settings where the original algorithm is inefficient.}}, author = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}}, issn = {{0178-4617}}, journal = {{Algorithmica}}, keywords = {{Dynamic optimization, Evolutionary algorithms, Running time analysis}}, number = {{10}}, pages = {{3148–3179}}, title = {{{Time Complexity Analysis of Randomized Search Heuristics for the Dynamic Graph Coloring Problem}}}, doi = {{10.1007/s00453-021-00838-3}}, volume = {{83}}, year = {{2021}}, } @inproceedings{19609, abstract = {{Modern services comprise interconnected components, e.g., microservices in a service mesh, that can scale and run on multiple nodes across the network on demand. To process incoming traffic, service components have to be instantiated and traffic assigned to these instances, taking capacities and changing demands into account. This challenge is usually solved with custom approaches designed by experts. While this typically works well for the considered scenario, the models often rely on unrealistic assumptions or on knowledge that is not available in practice (e.g., a priori knowledge). We propose a novel deep reinforcement learning approach that learns how to best coordinate services and is geared towards realistic assumptions. It interacts with the network and relies on available, possibly delayed monitoring information. Rather than defining a complex model or an algorithm how to achieve an objective, our model-free approach adapts to various objectives and traffic patterns. An agent is trained offline without expert knowledge and then applied online with minimal overhead. Compared to a state-of-the-art heuristic, it significantly improves flow throughput and overall network utility on real-world network topologies and traffic traces. It also learns to optimize different objectives, generalizes to scenarios with unseen, stochastic traffic patterns, and scales to large real-world networks.}}, author = {{Schneider, Stefan Balthasar and Manzoor, Adnan and Qarawlus, Haydar and Schellenberg, Rafael and Karl, Holger and Khalili, Ramin and Hecker, Artur}}, booktitle = {{IEEE International Conference on Network and Service Management (CNSM)}}, keywords = {{self-driving networks, self-learning, network coordination, service coordination, reinforcement learning, deep learning, nfv}}, publisher = {{IEEE}}, title = {{{Self-Driving Network and Service Coordination Using Deep Reinforcement Learning}}}, year = {{2020}}, } @proceedings{19976, abstract = {{The aim to reduce pollutant emission has led to a trend towards lightweight construction in car body development during the last years. As a consequence of the resulting need for multi-material design, mechanical joining technologies become increasingly important. Mechanical joining allows for the combination of dissimilar materials, while thermic joining techniques reach their limits. Self-piercing riveting enables the joining of dissimilar materials by using semi-tubular rivets as mechanical fasteners. The rivet production, however, is costly and time-consuming, as the rivets generally have to be hardened, tempered and coated after forming, in order to achieve an adequate strength and corrosion resistance. A promising approach to improve the efficiency of the rivet manufacturing is the use of high-strength high nitrogen steel as rivet material because these additional process steps would not be necessary anymore. As a result of the comparatively high nitrogen content, such steels have various beneficial properties like higher strength, good ductility and improved corrosion resistance. By cold bulk forming of high nitrogen steels high-strength parts can be manufactured due to the strengthening which is caused by the high strain hardening. However, high tool loads thereby have to be expected and are a major challenge during the production process. Consequently, there is a need for appropriate forming strategies. This paper presents key aspects concerning the process design for the manufacturing of semi-tubular self-piercing rivets made of high-strength steel. The aim is to produce the rivets in several forming stages without intermediate heat treatment between the single stages. Due to the high strain hardening of the material, a two stage forming concept will be investigated. Cup-backward extrusion is chosen as the first process step in order to form the rivet shank without forming the rivet foot. Thus, the strain hardening effects in the area of the rivet foot are minimized and the tool loads during the following process step can be reduced. During the second and final forming stage the detailed geometry of the rivet foot and the rivet head is formed. In this context, the effect of different variations, for example concerning the final geometry of the rivet foot, on the tool load is investigated using multistage numerical analysis. Furthermore, the influence of the process temperature on occurring stresses is analysed. Based on the results of the investigations, an adequate forming strategy and a tool concept for the manufacturing of semi-tubular self-piercing rivets made of high-strength steel are presented.}}, editor = {{Kuball, Clara-Maria and Uhe, Benedikt and Meschut, Gerson and Merklein, Marion}}, keywords = {{high nitrogen steel, self-piercing riveting, joining by forming, bulk forming, tool design}}, pages = {{280--285}}, title = {{{Process design for the forming of semi-tubular self-piercing rivets made of high nitrogen steel}}}, doi = {{10.1016/j.promfg.2020.08.052}}, volume = {{50}}, year = {{2020}}, } @inproceedings{17370, abstract = {{ We consider a natural extension to the metric uncapacitated Facility Location Problem (FLP) in which requests ask for different commodities out of a finite set \( S \) of commodities. Ravi and Sinha (SODA 2004) introduced the model as the \emph{Multi-Commodity Facility Location Problem} (MFLP) and considered it an offline optimization problem. The model itself is similar to the FLP: i.e., requests are located at points of a finite metric space and the task of an algorithm is to construct facilities and assign requests to facilities while minimizing the construction cost and the sum over all assignment distances. In addition, requests and facilities are heterogeneous; they request or offer multiple commodities out of $S$. A request has to be connected to a set of facilities jointly offering the commodities demanded by it. In comparison to the FLP, an algorithm has to decide not only if and where to place facilities, but also which commodities to offer at each. To the best of our knowledge we are the first to study the problem in its online variant in which requests, their positions and their commodities are not known beforehand but revealed over time. We present results regarding the competitive ratio. On the one hand, we show that heterogeneity influences the competitive ratio by developing a lower bound on the competitive ratio for any randomized online algorithm of \( \Omega ( \sqrt{|S|} + \frac{\log n}{\log \log n} ) \) that already holds for simple line metrics. Here, \( n \) is the number of requests. On the other side, we establish a deterministic \( \mathcal{O}(\sqrt{|S|} \cdot \log n) \)-competitive algorithm and a randomized \( \mathcal{O}(\sqrt{|S|} \cdot \frac{\log n}{\log \log n} ) \)-competitive algorithm. Further, we show that when considering a more special class of cost functions for the construction cost of a facility, the competitive ratio decreases given by our deterministic algorithm depending on the function.}}, author = {{Castenow, Jannik and Feldkord, Björn and Knollmann, Till and Malatyali, Manuel and Meyer auf der Heide, Friedhelm}}, booktitle = {{Proceedings of the 32nd ACM Symposium on Parallelism in Algorithms and Architectures}}, isbn = {{9781450369350}}, keywords = {{Online Multi-Commodity Facility Location, Competitive Ratio, Online Optimization, Facility Location Problem}}, title = {{{The Online Multi-Commodity Facility Location Problem}}}, doi = {{10.1145/3350755.3400281}}, year = {{2020}}, } @article{20143, author = {{Otroshi, Mortaza and Rossel, Moritz and Meschut, Gerson}}, journal = {{Journal of Advanced Joining Processes}}, keywords = {{Self-pierce riveting, Ductile fracture, Damage modeling, GISSMO damage model}}, publisher = {{Elsevier}}, title = {{{Stress state dependent damage modeling of self-pierce riveting process simulation using GISSMO damage model}}}, doi = {{10.1016/j.jajp.2020.100015}}, volume = {{1}}, year = {{2020}}, } @article{35298, abstract = {{Im Artikel werden drei verschiedene Lernzugänge (kom-petenzorientiertes, ästhetisches und biographisches Lernen) vorgestellt und aus theoretischer Perspektive deren motivierender Gehalt für selbstreguliertes Lernen in Praxisphasen des Lehramtsstudiumsherausgearbeitet. Als theoretische Grund-lage dient die Selbstbestimmungstheorie als zentrale motivationale Theorie zur Erklärung selbstbestimmten Handelns.}}, author = {{Caruso, Carina and Adammek, Christine and Bonanati, Sabrina and Wiescholek, Sybille}}, issn = {{2625-0675}}, journal = {{Herausforderung Lehrer*innenbildung - Zeitschrift Zur Konzeption, Gestaltung Und Diskussion}}, keywords = {{ästhetische Forschung, Biographiearbeit, Praxissemester, Professionalisierung, selbstreguliertes Lernen, Motivation / aesthetic research, biographical work, long-term internship, profes-sionalization, self-regulated learning, motivation}}, number = {{1}}, pages = {{18--33}}, title = {{{Motivierende Lernzugänge als Ausgangspunkt der Professionalisierung angehender Lehrer_innen}}}, doi = {{10.4119/hlz-2540}}, volume = {{3}}, year = {{2020}}, } @article{33866, abstract = {{Helhmoltz–Kirchhoff equations of motions of vortices of an incompressible fluid in the plane define a dynamics with singularities and this leads to a Zermelo navigation problem describing the ship travel in such a field where the control is the heading angle. Considering one vortex, we define a time minimization problem which can be analyzed with the technics of geometric optimal control combined with numerical simulations, the geometric frame being the extension of Randers metrics in the punctured plane, with rotational symmetry. Candidates as minimizers are parameterized thanks to the Pontryagin Maximum Principle as extremal solutions of a Hamiltonian vector field. We analyze the time minimal solution to transfer the ship between two points where during the transfer the ship can be either in a strong current region in the vicinity of the vortex or in a weak current region. The analysis is based on a micro-local classification of the extremals using mainly the integrability properties of the dynamics due to the rotational symmetry. The discussion is complex and related to the existence of an isolated extremal (Reeb) circle due to the vortex singularity. The explicit computation of cut points where the extremal curves cease to be optimal is given and the spheres are described in the case where at the initial point the current is weak.}}, author = {{Bonnard, Bernard and Cots, Olivier and Wembe Moafo, Boris Edgar}}, issn = {{1292-8119}}, journal = {{ESAIM: Control, Optimisation and Calculus of Variations}}, keywords = {{Computational Mathematics, Control and Optimization, Control and Systems Engineering}}, publisher = {{EDP Sciences}}, title = {{{A Zermelo navigation problem with a vortex singularity}}}, doi = {{10.1051/cocv/2020058}}, volume = {{27}}, year = {{2020}}, } @proceedings{19974, abstract = {{Due to the trend towards lightweight design in car body development mechanical joining technologies become increasingly important. These techniques allow for the joining of dissimilar materials and thus enable multi-material design, while thermic joining methods reach their limits. Semi-tubular self-piercing riveting is an important mechanical joining technology. The rivet production, however, is costly and time-consuming, as the process consists of several process steps including the heat treatment and coating of the rivets in order to achieve an adequate strength and corrosion resistance. The use of high nitrogen steel as rivet material leads to the possibility of reducing process steps and hence increasing the efficiency of the process. However, the high tool loads being expected due to the high strain hardening of the material are a major challenge during the rivet production. Thus, there is a need for appropriate forming strategies, such as the manufacturing of the rivets at elevated temperatures. Prior investigations led to the conclusion that forming already at 200 °C results in a distinct reduction of the yield strength. To create a deeper understanding of the forming behaviour of high nitrogen steel at elevated temperatures, compression tests were conducted in a temperature range between room temperature and 200 °C. The determined true stress – true strain curves are the basis for the further process and tool design of the rivet production. Another key factor for the rivet manufacturing at elevated temperatures is the influence of the process temperature on the tribological conditions. For this reason, ring compression tests at room temperature and 200 °C are carried out. The friction factors are determined on the basis of calibration curves resulting from the numerical analysis of the ring compression process. The investigations indicate that the friction factor at 200 °C is significantly higher compared to room temperature. This essential fact has to be taken into account for the process and tool design for the rivet production using high nitrogen steel.}}, editor = {{Kuball, Clara-Maria and Jung, R and Uhe, Benedikt and Meschut, Gerson and Merklein, Marion}}, keywords = {{High nitrogen steel, Self-piercing riveting, Joining by forming, Bulk forming, Strain hardening}}, title = {{{Influence of the process temperature on the forming behaviour and the friction during bulk forming of high nitrogen steel}}}, doi = {{10.1016/j.jajp.2020.100023}}, volume = {{1}}, year = {{2020}}, } @article{19973, abstract = {{As a result of lightweight design, increased use is being made of high-strength steel and aluminium in car bodies. Self-piercing riveting is an established technique for joining these materials. The dissimilar properties of the two materials have led to a number of different rivet geometries in the past. Each rivet geometry fulfils the requirements of the materials within a limited range. In the present investigation, an improved rivet geometry is developed, which permits the reliable joining of two material combinations that could only be joined by two different rivet geometries up until now. Material combination 1 consists of high-strength steel on both sides, while material combination 2 comprises aluminium on the punch side and high-strength steel on the die side. The material flow and the stress and strain conditions prevailing during the joining process are analysed by means of numerical simulation. The rivet geometry is then improved step-by-step on the basis of this analysis. Finally, the improved rivet geometry is manufactured and the findings of the investigation are verified in experimental joining tests.}}, author = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}}, journal = {{Production Engineering}}, keywords = {{Self-piercing riveting, Joining technology, Rivet geometry, Multi-material design, High-strength steel, Aluminium}}, pages = {{417--423}}, title = {{{Improvement of a rivet geometry for the self-piercing riveting of high-strength steel and multi-material joints}}}, doi = {{10.1007/s11740-020-00973-w}}, volume = {{14}}, year = {{2020}}, } @article{46334, abstract = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs – both to be minimized – is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers.}}, author = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}}, issn = {{1568-4946}}, journal = {{Applied Soft Computing}}, keywords = {{Algorithm selection, Multi-objective optimization, Performance measurement, Combinatorial optimization, Traveling Salesperson Problem}}, pages = {{105901}}, title = {{{A multi-objective perspective on performance assessment and automated selection of single-objective optimization algorithms}}}, doi = {{https://doi.org/10.1016/j.asoc.2019.105901}}, volume = {{88}}, year = {{2020}}, } @inproceedings{48847, abstract = {{Dynamic optimization problems have gained significant attention in evolutionary computation as evolutionary algorithms (EAs) can easily adapt to changing environments. We show that EAs can solve the graph coloring problem for bipartite graphs more efficiently by using dynamic optimization. In our approach the graph instance is given incrementally such that the EA can reoptimize its coloring when a new edge introduces a conflict. We show that, when edges are inserted in a way that preserves graph connectivity, Randomized Local Search (RLS) efficiently finds a proper 2-coloring for all bipartite graphs. This includes graphs for which RLS and other EAs need exponential expected time in a static optimization scenario. We investigate different ways of building up the graph by popular graph traversals such as breadth-first-search and depth-first-search and analyse the resulting runtime behavior. We further show that offspring populations (e. g. a (1 + {$\lambda$}) RLS) lead to an exponential speedup in {$\lambda$}. Finally, an island model using 3 islands succeeds in an optimal time of {$\Theta$}(m) on every m-edge bipartite graph, outperforming offspring populations. This is the first example where an island model guarantees a speedup that is not bounded in the number of islands.}}, author = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}}, pages = {{1277–1285}}, publisher = {{Association for Computing Machinery}}, title = {{{More Effective Randomized Search Heuristics for Graph Coloring through Dynamic Optimization}}}, doi = {{10.1145/3377930.3390174}}, year = {{2020}}, } @inproceedings{48849, abstract = {{One-shot optimization tasks require to determine the set of solution candidates prior to their evaluation, i.e., without possibility for adaptive sampling. We consider two variants, classic one-shot optimization (where our aim is to find at least one solution of high quality) and one-shot regression (where the goal is to fit a model that resembles the true problem as well as possible). For both tasks it seems intuitive that well-distributed samples should perform better than uniform or grid-based samples, since they show a better coverage of the decision space. In practice, quasi-random designs such as Latin Hypercube Samples and low-discrepancy point sets are indeed very commonly used designs for one-shot optimization tasks. We study in this work how well low star discrepancy correlates with performance in one-shot optimization. Our results confirm an advantage of low-discrepancy designs, but also indicate the correlation between discrepancy values and overall performance is rather weak. We then demonstrate that commonly used designs may be far from optimal. More precisely, we evolve 24 very specific designs that each achieve good performance on one of our benchmark problems. Interestingly, we find that these specifically designed samples yield surprisingly good performance across the whole benchmark set. Our results therefore give strong indication that significant performance gains over state-of-the-art one-shot sampling techniques are possible, and that evolutionary algorithms can be an efficient means to evolve these.}}, author = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal and Neumann, Aneta and Neumann, Frank}}, booktitle = {{Parallel Problem Solving from Nature (PPSN XVI)}}, isbn = {{978-3-030-58111-4}}, keywords = {{Continuous optimization, Fully parallel search, One-shot optimization, Regression, Surrogate-assisted optimization}}, pages = {{111–124}}, publisher = {{Springer-Verlag}}, title = {{{Evolving Sampling Strategies for One-Shot Optimization Tasks}}}, doi = {{10.1007/978-3-030-58112-1_8}}, year = {{2020}}, } @inproceedings{48851, abstract = {{Several important optimization problems in the area of vehicle routing can be seen as variants of the classical Traveling Salesperson Problem (TSP). In the area of evolutionary computation, the Traveling Thief Problem (TTP) has gained increasing interest over the last 5 years. In this paper, we investigate the effect of weights on such problems, in the sense that the cost of traveling increases with respect to the weights of nodes already visited during a tour. This provides abstractions of important TSP variants such as the Traveling Thief Problem and time dependent TSP variants, and allows to study precisely the increase in difficulty caused by weight dependence. We provide a 3.59-approximation for this weight dependent version of TSP with metric distances and bounded positive weights. Furthermore, we conduct experimental investigations for simple randomized local search with classical mutation operators and two variants of the state-of-the-art evolutionary algorithm EAX adapted to the weighted TSP. Our results show the impact of the node weights on the position of the nodes in the resulting tour.}}, author = {{Bossek, Jakob and Casel, Katrin and Kerschke, Pascal and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}}, pages = {{1286–1294}}, publisher = {{Association for Computing Machinery}}, title = {{{The Node Weight Dependent Traveling Salesperson Problem: Approximation Algorithms and Randomized Search Heuristics}}}, doi = {{10.1145/3377930.3390243}}, year = {{2020}}, } @inproceedings{48845, abstract = {{In practice, e.g. in delivery and service scenarios, Vehicle-Routing-Problems (VRPs) often imply repeated decision making on dynamic customer requests. As in classical VRPs, tours have to be planned short while the number of serviced customers has to be maximized at the same time resulting in a multi-objective problem. Beyond that, however, dynamic requests lead to the need for re-planning of not yet realized tour parts, while already realized tour parts are irreversible. In this paper we study this type of bi-objective dynamic VRP including sequential decision making and concurrent realization of decisions. We adopt a recently proposed Dynamic Evolutionary Multi-Objective Algorithm (DEMOA) for a related VRP problem and extend it to the more realistic (here considered) scenario of multiple vehicles. We empirically show that our DEMOA is competitive with a multi-vehicle offline and clairvoyant variant of the proposed DEMOA as well as with the dynamic single-vehicle approach proposed earlier.}}, author = {{Bossek, Jakob and Grimme, Christian and Trautmann, Heike}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{decision making, dynamic optimization, evolutionary algorithms, multi-objective optimization, vehicle routing}}, pages = {{166–174}}, publisher = {{Association for Computing Machinery}}, title = {{{Dynamic Bi-Objective Routing of Multiple Vehicles}}}, doi = {{10.1145/3377930.3390146}}, year = {{2020}}, } @inproceedings{48850, abstract = {{Sequential model-based optimization (SMBO) approaches are algorithms for solving problems that require computationally or otherwise expensive function evaluations. The key design principle of SMBO is a substitution of the true objective function by a surrogate, which is used to propose the point(s) to be evaluated next. SMBO algorithms are intrinsically modular, leaving the user with many important design choices. Significant research efforts go into understanding which settings perform best for which type of problems. Most works, however, focus on the choice of the model, the acquisition function, and the strategy used to optimize the latter. The choice of the initial sampling strategy, however, receives much less attention. Not surprisingly, quite diverging recommendations can be found in the literature. We analyze in this work how the size and the distribution of the initial sample influences the overall quality of the efficient global optimization (EGO) algorithm, a well-known SMBO approach. While, overall, small initial budgets using Halton sampling seem preferable, we also observe that the performance landscape is rather unstructured. We furthermore identify several situations in which EGO performs unfavorably against random sampling. Both observations indicate that an adaptive SMBO design could be beneficial, making SMBO an interesting test-bed for automated algorithm design.}}, author = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-7128-5}}, keywords = {{continuous black-box optimization, design of experiments, initial design, sequential model-based optimization}}, pages = {{778–786}}, publisher = {{Association for Computing Machinery}}, title = {{{Initial Design Strategies and Their Effects on Sequential Model-Based Optimization: An Exploratory Case Study Based on BBOB}}}, doi = {{10.1145/3377930.3390155}}, year = {{2020}}, } @article{48848, abstract = {{We build upon a recently proposed multi-objective view onto performance measurement of single-objective stochastic solvers. The trade-off between the fraction of failed runs and the mean runtime of successful runs \textendash both to be minimized \textendash is directly analyzed based on a study on algorithm selection of inexact state-of-the-art solvers for the famous Traveling Salesperson Problem (TSP). Moreover, we adopt the hypervolume indicator (HV) commonly used in multi-objective optimization for simultaneously assessing both conflicting objectives and investigate relations to commonly used performance indicators, both theoretically and empirically. Next to Penalized Average Runtime (PAR) and Penalized Quantile Runtime (PQR), the HV measure is used as a core concept within the construction of per-instance algorithm selection models offering interesting insights into complementary behavior of inexact TSP solvers. \textbullet The multi-objective perspective is naturally generalizable to multiple objectives. \textbullet Proof of relationship between HV and the PAR in the considered bi-objective space. \textbullet New insights into complementary behavior of stochastic optimization algorithms.}}, author = {{Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}}, issn = {{1568-4946}}, journal = {{Applied Soft Computing}}, keywords = {{Algorithm selection, Combinatorial optimization, Multi-objective optimization, Performance measurement, Traveling Salesperson Problem}}, number = {{C}}, title = {{{A Multi-Objective Perspective on Performance Assessment and Automated Selection of Single-Objective Optimization Algorithms}}}, doi = {{10.1016/j.asoc.2019.105901}}, volume = {{88}}, year = {{2020}}, } @article{4562, abstract = {{Employing main and sector-specific investment-grade CDS indices from the North American and European CDS market and performing mean-variance out-of-sample analyses for conservative and aggressive investors over the period from 2006 to 2014, this paper analyzes portfolio benefits of adding corporate CDS indices to a traditional financial portfolio consisting of stock and sovereign bond indices. As a baseline result, we initially find an increase in portfolio (downside) risk-diversification when adding CDS indices, which is observed irrespective of both CDS markets, investor-types and different sub-periods, including the global financial crisis and European sovereign debt crisis. In addition, the analysis reveals higher portfolio excess returns and performance in CDS index portfolios, however, these effects clearly differ between markets, investor-types and sub-periods. Overall, portfolio benefits of adding CDS indices mainly result from the fact that institutional investors replace sovereign bond indices rather than stock indices by CDS indices due to better risk-return characteristics. Our baseline findings remain robust under a variety of robustness checks. Results from sensitivity analyses provide further important implications for institutional investors with a strategic focus on a long-term conservative portfolio management.}}, author = {{Hippert, Benjamin and Uhde, André and Wengerek, Sascha Tobias}}, journal = {{Review of Derivatives Research }}, keywords = {{Corporate credit default swap indices, Mean-variance asset allocation, Out-of-sample portfolio optimization, Portfolio risk-diversification, Portfolio performance evaluation}}, number = {{2}}, pages = {{203--259}}, title = {{{Portfolio Benefits of Adding Corporate Credit Default Swap Indices: Evidence from North America and Europe}}}, doi = {{https://doi.org/10.1007/s11147-018-9148-8}}, volume = {{22}}, year = {{2019}}, } @phdthesis{10000, abstract = {{Ultraschall wird zur Effizienzsteigerung in verfahrenstechnischen Prozessen eingesetzt. Die Betriebsparamter der Ultraschallsysteme werden empirisch ermittelt, da derzeit keine systematische Analyse der Wechselwirkung zwischen Ultraschallwandler und Schallfeld sowie kein Verfahren zur Messung der Kavitationsaktivität ohne zusätzlichen Sensor existieren. Auf Basis einer experimentellen Analyse des betrachteten sonochemischen Reaktors wird ein Finite-Elemente-Modell aufgebaut, das die Wechselwirkung zwischen Schallfeld und Ultraschallwandler berücksichtigt. Die modellbasierte Analyse zeigt, dass wegen der akustischen Eigenschaften des Autoklavs nur direkt an der Sonotrode Kavitation entsteht. Die Wechselwirkung zwischen Ultraschallwandler und Schallfeld ermöglicht Aussagen über das Schallfeld und die Kavitationsaktivität auf Basis der Rückwirkung auf den Ultraschallwandler. Die lineare Schalldruckverteilung ermöglicht eine Prognose über die Verteilung von Kavitationszonen. Das beschriebene Modell liefert wertvolle Erkenntnisse für die Auslegung, Analyse und Skalierung sonochemischer Reaktoren. Auf Grund der rauen Prozessrandbedingungen ist die Applikation von Sensoren zur Überwachung der Kavitationsaktivität in vielen sonochemischen Prozessen nicht möglich. Zur prozessbegleitenden Messung der Kavitationsaktivität wird ein Verfahren entwickelt, das die Bewertung der Kavitationsaktivität durch Auswertung der Rückwirkung auf den Ultraschallwandler erlaubt. Das Messverfahren ermöglicht eine vorhersagbare und reproduzierbare Durchführung kavitationsbasierter Prozesse und stellt eine wichtige Erweiterung für bestehende und neue Ultraschallsysteme dar.}}, author = {{Bornmann, Peter}}, keywords = {{Sonochemie, Akustische Kavitation, Kavitationsmessung, Kavitationsdetektion, FEM-Simulation Ultraschallwandler, Prozessüberwachung, FEM-Simulation Schallfeld, Self-Sensing, Piezoelektrische Ultraschallwandler, Ultraschallreinigung}}, publisher = {{Shaker}}, title = {{{Modellierung und experimentelle Charakterisierung der Wechselwirkung zwischen Ultraschallwandler und Flüssigkeit in kavitationsbasierten Prozessen}}}, year = {{2019}}, } @inproceedings{48841, abstract = {{We tackle a bi-objective dynamic orienteering problem where customer requests arise as time passes by. The goal is to minimize the tour length traveled by a single delivery vehicle while simultaneously keeping the number of dismissed dynamic customers to a minimum. We propose a dynamic Evolutionary Multi-Objective Algorithm which is grounded on insights gained from a previous series of work on an a-posteriori version of the problem, where all request times are known in advance. In our experiments, we simulate different decision maker strategies and evaluate the development of the Pareto-front approximations on exemplary problem instances. It turns out, that despite severely reduced computational budget and no oracle-knowledge of request times the dynamic EMOA is capable of producing approximations which partially dominate the results of the a-posteriori EMOA and dynamic integer linear programming strategies.}}, author = {{Bossek, Jakob and Grimme, Christian and Meisel, Stephan and Rudolph, Günter and Trautmann, Heike}}, booktitle = {{Evolutionary Multi-Criterion Optimization (EMO)}}, editor = {{Deb, Kalyanmoy and Goodman, Erik and Coello Coello, Carlos A. and Klamroth, Kathrin and Miettinen, Kaisa and Mostaghim, Sanaz and Reed, Patrick}}, isbn = {{978-3-030-12598-1}}, keywords = {{Combinatorial optimization, Dynamic optimization, Metaheuristics, Multi-objective optimization, Vehicle routing}}, pages = {{516–528}}, publisher = {{Springer International Publishing}}, title = {{{Bi-Objective Orienteering: Towards a Dynamic Multi-objective Evolutionary Algorithm}}}, doi = {{10.1007/978-3-030-12598-1_41}}, year = {{2019}}, } @inproceedings{48842, abstract = {{Evolutionary algorithms have successfully been applied to evolve problem instances that exhibit a significant difference in performance for a given algorithm or a pair of algorithms inter alia for the Traveling Salesperson Problem (TSP). Creating a large variety of instances is crucial for successful applications in the blooming field of algorithm selection. In this paper, we introduce new and creative mutation operators for evolving instances of the TSP. We show that adopting those operators in an evolutionary algorithm allows for the generation of benchmark sets with highly desirable properties: (1) novelty by clear visual distinction to established benchmark sets in the field, (2) visual and quantitative diversity in the space of TSP problem characteristics, and (3) significant performance differences with respect to the restart versions of heuristic state-of-the-art TSP solvers EAX and LKH. The important aspect of diversity is addressed and achieved solely by the proposed mutation operators and not enforced by explicit diversity preservation.}}, author = {{Bossek, Jakob and Kerschke, Pascal and Neumann, Aneta and Wagner, Markus and Neumann, Frank and Trautmann, Heike}}, booktitle = {{Proceedings of the 15th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}}, isbn = {{978-1-4503-6254-2}}, keywords = {{benchmarking, instance features, optimization, problem generation, traveling salesperson problem}}, pages = {{58–71}}, publisher = {{Association for Computing Machinery}}, title = {{{Evolving Diverse TSP Instances by Means of Novel and Creative Mutation Operators}}}, doi = {{10.1145/3299904.3340307}}, year = {{2019}}, } @inproceedings{48843, abstract = {{We contribute to the theoretical understanding of randomized search heuristics for dynamic problems. We consider the classical graph coloring problem and investigate the dynamic setting where edges are added to the current graph. We then analyze the expected time for randomized search heuristics to recompute high quality solutions. This includes the (1+1) EA and RLS in a setting where the number of colors is bounded and we are minimizing the number of conflicts as well as iterated local search algorithms that use an unbounded color palette and aim to use the smallest colors and - as a consequence - the smallest number of colors. We identify classes of bipartite graphs where reoptimization is as hard as or even harder than optimization from scratch, i. e. starting with a random initialization. Even adding a single edge can lead to hard symmetry problems. However, graph classes that are hard for one algorithm turn out to be easy for others. In most cases our bounds show that reoptimization is faster than optimizing from scratch. Furthermore, we show how to speed up computations by using problem specific operators concentrating on parts of the graph where changes have occurred.}}, author = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-6111-8}}, keywords = {{dynamic optimization, evolutionary algorithms, running time analysis, theory}}, pages = {{1443–1451}}, publisher = {{Association for Computing Machinery}}, title = {{{Runtime Analysis of Randomized Search Heuristics for Dynamic Graph Coloring}}}, doi = {{10.1145/3321707.3321792}}, year = {{2019}}, } @inproceedings{48840, abstract = {{Research has shown that for many single-objective graph problems where optimum solutions are composed of low weight sub-graphs, such as the minimum spanning tree problem (MST), mutation operators favoring low weight edges show superior performance. Intuitively, similar observations should hold for multi-criteria variants of such problems. In this work, we focus on the multi-criteria MST problem. A thorough experimental study is conducted where we estimate the probability of edges being part of non-dominated spanning trees as a function of the edges’ non-domination level or domination count, respectively. Building on gained insights, we propose several biased one-edge-exchange mutation operators that differ in the used edge-selection probability distribution (biased towards edges of low rank). Our empirical analysis shows that among different graph types (dense and sparse) and edge weight types (both uniformly random and combinations of Euclidean and uniformly random) biased edge-selection strategies perform superior in contrast to the baseline uniform edge-selection. Our findings are in particular strong for dense graphs.}}, author = {{Bossek, Jakob and Grimme, Christian and Neumann, Frank}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-6111-8}}, keywords = {{biased mutation, combinatorial optimization, minimum spanning tree, multi-objective optimization}}, pages = {{516–523}}, publisher = {{Association for Computing Machinery}}, title = {{{On the Benefits of Biased Edge-Exchange Mutation for the Multi-Criteria Spanning Tree Problem}}}, doi = {{10.1145/3321707.3321818}}, year = {{2019}}, } @inproceedings{1163, abstract = {{In this paper we present two major results: First, we introduce the first self-stabilizing version of a supervised overlay network (as introduced in~\cite{DBLP:conf/ispan/KothapalliS05}) by presenting a self-stabilizing supervised skip ring. Secondly, we show how to use the self-stabilizing supervised skip ring to construct an efficient self-stabilizing publish-subscribe system. That is, in addition to stabilizing the overlay network, every subscriber of a topic will eventually know all of the publications that have been issued so far for that topic. The communication work needed to processes a subscribe or unsubscribe operation is just a constant in a legitimate state, and the communication work of checking whether the system is still in a legitimate state is just a constant on expectation for the supervisor as well as any process in the system. }}, author = {{Feldmann, Michael and Kolb, Christina and Scheideler, Christian and Strothmann, Thim Frederik}}, booktitle = {{Proceedings of the 32nd IEEE International Parallel & Distributed Processing Symposium (IPDPS)}}, keywords = {{Topological Self-stabilization, Supervised Overlay, Publish-Subscribe System}}, location = {{Vancouver}}, publisher = {{IEEE}}, title = {{{Self-Stabilizing Supervised Publish-Subscribe Systems}}}, doi = {{10.1109/IPDPS.2018.00114}}, year = {{2018}}, } @inproceedings{5675, abstract = {{When responding to natural disasters, professional relief units are often supported by many volunteers which are not affiliated to humanitarian organizations. The effective coordination of these volunteers is crucial to leverage their capabilities and to avoid conflicts with professional relief units. In this paper, we empirically identify key requirements that professional relief units pose on this coordination. Based on these requirements, we suggest a decision model. We computationally solve a real-world instance of the model and empirically validate the computed solution in interviews with practitioners. Our results show that the suggested model allows for solving volunteer coordination tasks of realistic size near-optimally within short time, with the determined solution being well accepted by practitioners. We also describe in this article how the suggested decision support model is integrated in the volunteer coordination system which we develop in joint cooperation with a disaster management authority and a software development company.}}, author = {{Rauchecker, Gerhard and Schryen, Guido}}, booktitle = {{Proceedings of the 15th International Conference on Information Systems for Crisis Response and Management}}, keywords = {{Coordination of spontaneous volunteers, volunteer coordination system, decision support, scheduling optimization model, linear programming}}, location = {{Rochester, NY, USA}}, title = {{{Decision Support for the Optimal Coordination of Spontaneous Volunteers in Disaster Relief}}}, year = {{2018}}, } @inproceedings{4411, abstract = {{While a lot of research in distributed computing has covered solutions for self-stabilizing computing and topologies, there is far less work on self-stabilization for distributed data structures. Considering crashing peers in peer-to-peer networks, it should not be taken for granted that a distributed data structure remains intact. In this work, we present a self-stabilizing protocol for a distributed data structure called the hashed Patricia Trie (Kniesburges and Scheideler WALCOM'11) that enables efficient prefix search on a set of keys. The data structure has a wide area of applications including string matching problems while offering low overhead and efficient operations when embedded on top of a distributed hash table. Especially, longest prefix matching for $x$ can be done in $\mathcal{O}(\log |x|)$ hash table read accesses. We show how to maintain the structure in a self-stabilizing way. Our protocol assures low overhead in a legal state and a total (asymptotically optimal) memory demand of $\Theta(d)$ bits, where $d$ is the number of bits needed for storing all keys.}}, author = {{Knollmann, Till and Scheideler, Christian}}, booktitle = {{Proceedings of the 20th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}}, editor = {{Izumi, Taisuke and Kuznetsov, Petr}}, keywords = {{Self-Stabilizing, Prefix Search, Distributed Data Structure}}, location = {{Tokyo}}, publisher = {{Springer, Cham}}, title = {{{A Self-Stabilizing Hashed Patricia Trie}}}, doi = {{10.1007/978-3-030-03232-6_1}}, volume = {{11201}}, year = {{2018}}, } @inproceedings{9999, abstract = {{Ultrasonic wire bonding is an indispensable process in the industrial manufacturing of semiconductor devices. Copper wire is increasingly replacing the well-established aluminium wire because of its superior electrical, thermal and mechanical properties. Copper wire processes differ significantly from aluminium processes and are more sensitive to disturbances, which reduces the range of parameter values suitable for a stable process. Disturbances can be compensated by an adaption of process parameters, but finding suitable parameters manually is difficult and time-consuming. This paper presents a physical model of the ultrasonic wire bonding process including the friction contact between tool and wire. This model yields novel insights into the process. A prototype of a multi-objective optimizing bonding machine (MOBM) is presented. It uses multi-objective optimization, based on the complete process model, to automatically select the best operating point as a compromise of concurrent objectives.}}, author = {{Unger, Andreas and Hunstig, Matthias and Meyer, Tobias and Brökelmann, Michael and Sextro, Walter}}, booktitle = {{In Proceedings of IMAPS 2018 – 51st Symposium on Microelectronics, Pasadena, CA, 2018}}, keywords = {{wire bonding, multi-objective optimization, process model, copper wire, self-optimization}}, title = {{{Intelligent Production of Wire Bonds using Multi-Objective Optimization – Insights, Opportunities and Challenges}}}, doi = {{10.4071/2380-4505-2018.1.000572}}, volume = {{Vol. 2018, No. 1, pp. 000572-000577.}}, year = {{2018}}, } @article{32158, abstract = {{Sociogenesis addresses a pervasive problem in psychology given by Cartesian dualism that assigns the mental an inner locus apart from material activity. Aligning ourselves to the ongoing critical discussions of interiorization in psychology, we explore the crucial notion of space by highlighting language as sociocultural and dialogical activity performed by other-oriented individuals. We discuss space in terms of the “language spacetime”, a symbolic, embodied formation of mutually positioned speaking and listening selves. This leads beyond the “inside-outside” container metaphor and allows for a reformulation of interiorization. Interiorization is conceptualized as a continuous series of different, though mutually related movements between self and other and self and self that lead to and are supported by specific formations in language activity: reversion, transposition, and decoupling. Along a short passage of a video-based interview, we trace the reversion of dialogical positions within the addressivity constellation of the two interlocutors, their interactive creation of a heterotopic spacetime, and the decoupling of one speaker's psychological activity from the concrete here-and-now and the present other by moving and acting into this new sphere. Interiorization appears as a movement at the border of past, present, and possible future(s).}}, author = {{Bertau, Marie-Cécile and Karsten, Andrea}}, issn = {{0732-118X}}, journal = {{New Ideas in Psychology}}, keywords = {{Interiorization, Dialogical self, Language activity, Voice, Vygotsky, Heterotopia, Video-confrontation}}, pages = {{7--17}}, publisher = {{Elsevier BV}}, title = {{{Reconsidering interiorization: Self moving across language spacetimes}}}, doi = {{10.1016/j.newideapsych.2017.12.001}}, volume = {{49}}, year = {{2018}}, } @phdthesis{9994, abstract = {{Reliability-adaptive systems allow an adaptation of system behavior based on current system reliability. They can extend their lifetime at the cost of lowered performance or vice versa. This can be used to adapt failure behavior according to a maintenance plan, thus increasing availability while using up system capability fully. To facilitate setup, a control algorithm independent of a degradation model is desired. A closed loop control technique for reliability based on a health index, a measure for system degradation, is introduced. It uses self-optimization as means to implement behavior adaptation. This is based on selecting the priorities of objectives that the system pursues. Possible working points are computed beforehand using model-based multiobjective optimization techniques. The controller selects the priorities of objectives and this way balances reliability and performance. As exemplary application, an automatically actuated single plate dry clutch is introduced. The entire reliability control is setup and lifetime experiments are conducted. Results show that the variance of time to failure is reduced greatly, making the failure behavior more predictable. At the same time, the desired usable lifetime can be extended at the cost of system performance to allow for changed maintenance intervals. Together, these possibilities allow for greater system usage and better planning of maintenance.}}, author = {{Meyer, Tobias}}, keywords = {{dependability, reliability, behavior adaptation, self-optimization, multiobjective optimization, optimal control, automotive drivetrain, clutch system, reliability-adaptive system}}, publisher = {{Shaker}}, title = {{{Optimization-based reliability control of mechatronic systems}}}, year = {{2018}}, } @inproceedings{48839, abstract = {{We analyze the effects of including local search techniques into a multi-objective evolutionary algorithm for solving a bi-objective orienteering problem with a single vehicle while the two conflicting objectives are minimization of travel time and maximization of the number of visited customer locations. Experiments are based on a large set of specifically designed problem instances with different characteristics and it is shown that local search techniques focusing on one of the objectives only improve the performance of the evolutionary algorithm in terms of both objectives. The analysis also shows that local search techniques are capable of sending locally optimal solutions to foremost fronts of the multi-objective optimization process, and that these solutions then become the leading factors of the evolutionary process.}}, author = {{Bossek, Jakob and Grimme, Christian and Meisel, Stephan and Rudolph, Günter and Trautmann, Heike}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-5618-3}}, keywords = {{combinatorial optimization, metaheuristics, multi-objective optimization, orienteering, transportation}}, pages = {{585–592}}, publisher = {{Association for Computing Machinery}}, title = {{{Local Search Effects in Bi-Objective Orienteering}}}, doi = {{10.1145/3205455.3205548}}, year = {{2018}}, } @inproceedings{48867, abstract = {{Assessing the performance of stochastic optimization algorithms in the field of multi-objective optimization is of utmost importance. Besides the visual comparison of the obtained approximation sets, more sophisticated methods have been proposed in the last decade, e. g., a variety of quantitative performance indicators or statistical tests. In this paper, we present tools implemented in the R package ecr, which assist in performing comprehensive and sound comparison and evaluation of multi-objective evolutionary algorithms following recommendations from the literature.}}, author = {{Bossek, Jakob}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference Companion}}, isbn = {{978-1-4503-5764-7}}, keywords = {{evolutionary optimization, performance assessment, software-tools}}, pages = {{1350–1356}}, publisher = {{Association for Computing Machinery}}, title = {{{Performance Assessment of Multi-Objective Evolutionary Algorithms with the R Package ecr}}}, doi = {{10.1145/3205651.3208312}}, year = {{2018}}, } @inproceedings{48885, abstract = {{Performance comparisons of optimization algorithms are heavily influenced by the underlying indicator(s). In this paper we investigate commonly used performance indicators for single-objective stochastic solvers, such as the Penalized Average Runtime (e.g., PAR10) or the Expected Running Time (ERT), based on exemplary benchmark performances of state-of-the-art inexact TSP solvers. Thereby, we introduce a methodology for analyzing the effects of (usually heuristically set) indicator parametrizations - such as the penalty factor and the method used for aggregating across multiple runs - w.r.t. the robustness of the considered optimization algorithms.}}, author = {{Kerschke, Pascal and Bossek, Jakob and Trautmann, Heike}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference Companion}}, isbn = {{978-1-4503-5764-7}}, keywords = {{algorithm selection, optimization, performance measures, transportation, travelling salesperson problem}}, pages = {{1737–1744}}, publisher = {{Association for Computing Machinery}}, title = {{{Parameterization of State-of-the-Art Performance Indicators: A Robustness Study Based on Inexact TSP Solvers}}}, doi = {{10.1145/3205651.3208233}}, year = {{2018}}, } @article{4419, abstract = {{Research on entrepreneurial learning highlights the importance of experience and prior knowledge to entrepreneurial success. However, a conundrum remains and we are still seeking answers as to why some novice entrepreneurs learn successfully from their experiences and succeed, while some experienced entrepreneurs fail with their ventures. In order to advance the discussion about the role of experience during entrepreneurial learning, our critical reflection aims to (1) highlight some of the shortcomings of experiential learning theory (ELT) and (2) illustrate how alternative theoretical perspectives have the potential to advance our conceptual understanding of entrepreneurial learning processes. We argue for an explanation of entrepreneurial learning as a dynamic and self-regulated process that relies on planning, monitoring, and self-reflection.}}, author = {{Fust, Alexander Paul and Jenert, Tobias and Winkler, Christoph}}, journal = {{Entrepreneurship Research Journal}}, keywords = {{entrepreneurial learning, experiential learning, self-regulated learning}}, number = {{2}}, pages = {{1--11}}, publisher = {{de @Gruyter}}, title = {{{Experiential or Self-Regulated Learning: A Critical Reflection of Entrepreneurial Learning Processes}}}, volume = {{8}}, year = {{2018}}, } @article{9862, abstract = {{In order to improve the credibility of modern simulation tools, uncertainties of different kinds have to be considered. This work is focused on epistemic uncertainties in the framework of continuum mechanics, which are taken into account by fuzzy analysis. The underlying min-max optimization problem of the extension principle is approximated by α-discretization, resulting in a separation of minimum and maximum problems. To become more universal, so-called quantities of interest are employed, which allow a general formulation for the target problem of interest. In this way, the relation to parameter identification problems based on least-squares functions is highlighted. The solutions of the related optimization problems with simple constraints are obtained with a gradient-based scheme, which is derived from a sensitvity analysis for the target problem by means of a variational formulation. Two numerical examples for the fuzzy analysis of material parameters are concerned with a necking problem at large strain elastoplasticity and a perforated strip at large strain hyperelasticity to demonstrate the versatility of the proposed variational formulation. }}, author = {{Mahnken, Rolf}}, issn = {{ 2325-3444}}, journal = {{Mathematics and Mechanics of complex systems}}, keywords = {{fuzzy analysis, α-level optimization, quantities of interest, optimization with simple constraints, large strain elasticity, large strain elastoplasticity}}, number = {{3-4}}, title = {{{"A variational formulation for fuzzy analysis in continuum mechanics"}}}, volume = {{5}}, year = {{2017}}, } @article{9976, abstract = {{State-of-the-art mechatronic systems offer inherent intelligence that enables them to autonomously adapt their behavior to current environmental conditions and to their own system state. This autonomous behavior adaptation is made possible by software in combination with complex sensor and actuator systems and by sophisticated information processing, all of which make these systems increasingly complex. This increasing complexity makes the design process a challenging task and brings new complex possibilities for operation and maintenance. However, with the risk of increased system complexity also comes the chance to adapt system behavior based on current reliability, which in turn increases reliability. The development of such an adaption strategy requires appropriate methods to evaluate reliability based on currently selected system behavior. A common approach to implement such adaptivity is to base system behavior on different working points that are obtained using multiobjective optimization. During operation, selection among these allows a changed operating strategy. To allow for multiobjective optimization, an accurate system model including system reliability is required. This model is repeatedly evaluated by the optimization algorithm. At present, modeling of system reliability and synchronization of the models of behavior and reliability is a laborious manual task and thus very error-prone. Since system behavior is crucial for system reliability, an integrated model is introduced that integrates system behavior and system reliability. The proposed approach is used to formulate reliability-related objective functions for a clutch test rig that are used to compute feasible working points using multiobjective optimization.}}, author = {{Kaul, Thorben and Meyer, Tobias and Sextro, Walter}}, journal = {{SAGE Journals}}, keywords = {{Integrated model, reliability, system behavior, Bayesian network, multiobjective optimization}}, pages = {{390 -- 399}}, title = {{{Formulation of reliability-related objective functions for design of intelligent mechatronic systems}}}, doi = {{10.1177/1748006X17709376}}, volume = {{Vol. 231(4)}}, year = {{2017}}, } @inproceedings{9978, abstract = {{Piezoelectric transducers are used in a wide range of applications. Reliability of these transducers is an important aspect in their application. Prognostics, which involve continuous monitoring of the health of technical systems and using this information to estimate the current health state and consequently predict the remaining useful lifetime (RUL), can be used to increase the reliability, safety, and availability of the transducers. This is achieved by utilizing the health state and RUL predictions to adaptively control the usage of the components or to schedule appropriate maintenance without interrupting operation. In this work, a prognostic approach utilizing self-sensing, where electric signals of a piezoelectric transducer are used as the condition monitoring data, is proposed. The approach involves training machine learning algorithms to model the degradation of the transducers through a health index and the use of the learned model to estimate the health index of similar transducers. The current health index is then used to estimate RUL of test components. The feasibility of the approach is demonstrated using piezoelectric bimorphs and the results show that the method is accurate in predicting the health index and RUL.}}, author = {{Kimotho, James Kuria and Sextro, Walter and Hemsel, Tobias}}, booktitle = {{IEEE Transactions on Reliability}}, keywords = {{Estimation of Remaining Useful Lifetime of Piezoelectric Transducers Based on Self-Sensing}}, pages = {{1 -- 10}}, title = {{{Estimation of Remaining Useful Lifetime of Piezoelectric Transducers Based on Self-Sensing}}}, doi = {{10.1109/TR.2017.2710260}}, year = {{2017}}, } @inproceedings{10676, author = {{Ho, Nam and Kaufmann, Paul and Platzner, Marco}}, booktitle = {{2017 International Conference on Field Programmable Technology (ICFPT)}}, keywords = {{Linux, cache storage, microprocessor chips, multiprocessing systems, LEON3-Linux based multicore processor, MiBench suite, block sizes, cache adaptation, evolvable caches, memory-to-cache-index mapping function, processor caches, reconfigurable cache mapping optimization, reconfigurable hardware technology, replacement strategies, standard Linux OS, time a complete hardware implementation, Hardware, Indexes, Linux, Measurement, Multicore processing, Optimization, Training}}, pages = {{215--218}}, title = {{{Evolvable caches: Optimization of reconfigurable cache mappings for a LEON3/Linux-based multi-core processor}}}, doi = {{10.1109/FPT.2017.8280144}}, year = {{2017}}, } @inproceedings{10780, author = {{Guettatfi, Zakarya and Hübner, Philipp and Platzner, Marco and Rinner, Bernhard}}, booktitle = {{12th International Symposium on Reconfigurable Communication-centric Systems-on-Chip (ReCoSoC)}}, keywords = {{embedded systems, image sensors, power aware computing, wireless sensor networks, Zynq-based VSN node prototype, computational self-awareness, design approach, platform levels, power consumption, visual sensor networks, visual sensor nodes, Cameras, Hardware, Middleware, Multicore processing, Operating systems, Runtime, Reconfigurable platforms, distributed embedded systems, performance-resource trade-off, self-awareness, visual sensor nodes}}, pages = {{1--8}}, title = {{{Computational self-awareness as design approach for visual sensor nodes}}}, doi = {{10.1109/ReCoSoC.2017.8016147}}, year = {{2017}}, } @article{29462, abstract = {{Time-variant age information of different parts of a system can be used for system-level performance improvement through high-level task scheduling, thus extending the life-time of the system. Progressive age information should provide the age state that the system is in, and the rate that it is being aged at. In this paper, we propose a structure that monitors certain paths of a circuit and detects its gradual age growth, and provides the aging rate and aging state of the circuit. The proposed monitors are placed on a selected set of nodes that represent a timing bottleneck of the system. These monitors sample expected data on these nodes, and compare them with the expected values. The timing of sampling changes as the circuit ages and its delay increases. The timing of sampling will provide a measure of aging advancement of a circuit. To assess the efficacy of the proposed method and compare it with other state-of-the-art aging monitors, we use them on selected nodes of the execution unit of different processors, as well as some circuits from ITC99 benchmarks. The results reveal that the precision of our proposed method is between 0.12 (ns) to 0.401 (ns). Its Area and power overhead are negligible and are about 2.13 and 0.69 percent respectively.}}, author = {{Sadeghi-Kohan, Somayeh and Kamal, Mehdi and Navabi, Zainalabedin}}, issn = {{2168-6750}}, journal = {{IEEE Transactions on Emerging Topics in Computing}}, keywords = {{Age advancement, age monitoring clock, aging rate, self-adjusting monitors}}, number = {{3}}, pages = {{627--641}}, publisher = {{Institute of Electrical and Electronics Engineers (IEEE)}}, title = {{{Self-Adjusting Monitor for Measuring Aging Rate and Advancement}}}, doi = {{10.1109/tetc.2017.2771441}}, volume = {{8}}, year = {{2017}}, } @inproceedings{48863, abstract = {{The novel R package ecr (version 2), short for Evolutionary Computation in R, provides a comprehensive collection of building blocks for constructing powerful evolutionary algorithms for single- and multi-objective continuous and combinatorial optimization problems. It allows to solve standard optimization tasks with few lines of code using a black-box approach. Moreover, rapid prototyping of non-standard ideas is possible via an explicit, white-box approach. This paper describes the design principles of the package and gives some introductory examples on how to use the package in practise.}}, author = {{Bossek, Jakob}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference Companion}}, isbn = {{978-1-4503-4939-0}}, keywords = {{evolutionary optimization, software-tools}}, pages = {{1187–1193}}, publisher = {{Association for Computing Machinery}}, title = {{{Ecr 2.0: A Modular Framework for Evolutionary Computation in R}}}, doi = {{10.1145/3067695.3082470}}, year = {{2017}}, } @inproceedings{48857, abstract = {{While finding minimum-cost spanning trees (MST) in undirected graphs is solvable in polynomial time, the multi-criteria minimum spanning tree problem (mcMST) is NP-hard. Interestingly, the mcMST problem has not been in focus of evolutionary computation research for a long period of time, although, its relevance for real world problems is easy to see. The available and most notable approaches by Zhou and Gen as well as by Knowles and Corne concentrate on solution encoding and on fairly dated selection mechanisms. In this work, we revisit the mcMST and focus on the mutation operators as exploratory components of evolutionary algorithms neglected so far. We investigate optimal solution characteristics to discuss current mutation strategies, identify shortcomings of these operators, and propose a sub-tree based operator which offers what we term Pareto-beneficial behavior: ensuring convergence and diversity at the same time. The operator is empirically evaluated inside modern standard evolutionary meta-heuristics for multi-criteria optimization and compared to hitherto applied mutation operators in the context of mcMST.}}, author = {{Bossek, Jakob and Grimme, Christian}}, booktitle = {{2017 IEEE Symposium Series on Computational Intelligence (SSCI)}}, keywords = {{Convergence, Encoding, Euclidean distance, Evolutionary computation, Heating systems, Optimization, Standards}}, pages = {{1–8}}, title = {{{A Pareto-Beneficial Sub-Tree Mutation for the Multi-Criteria Minimum Spanning Tree Problem}}}, doi = {{10.1109/SSCI.2017.8285183}}, year = {{2017}}, } @inproceedings{9966, abstract = {{Usage of copper wire bonds allows to push power boundaries imposed by aluminum wire bonds. Copper allows higher electrical, thermal and mechanical loads than aluminum, which currently is the most commonly used material in heavy wire bonding. This is the main driving factor for increased usage of copper in high power applications such as wind turbines, locomotives or electric vehicles. At the same time, usage of copper also increases tool wear and reduces the range of parameter values for a stable process, making the process more challenging. To overcome these drawbacks, parameter adaptation at runtime using self-optimization is desired. A self-optimizing system is based on system objectives that evaluate and quantify system performance. System parameters can be changed at runtime such that pre-selected objective values are reached. For adaptation of bond process parameters, model-based self-optimization is employed. Since it is based on a model of the system, the bond process was modeled. In addition to static model parameters such as wire and substrate material properties and vibration characteristics of transducer and tool, variable model inputs are process parameters. Main simulation result is bonded area in the wiresubstrate contact. This model is then used to find valid and optimal working points before operation. The working point is composed of normal force and ultrasonic voltage trajectories, which are usually determined experimentally. Instead, multiobjective optimalization is used to compute trajectories that simultaneously optimize bond quality, process duration, tool wear and probability of tool-substrate contacts. The values of these objectives are computed using the process model. At runtime, selection among pre-determined optimal working points is sufficient to prioritize individual objectives. This way, the computationally expensive process of numerically solving a multiobjective optimal control problem and the demanding high speed bonding process are separated. To evaluate to what extent the pre-defined goals of self-optimization are met, an offthe- shelf heavy wire bonding machine was modified to allow for parameter adaptation and for transmitting of measurement data at runtime. This data is received by an external computer system and evaluated to select a new working point. Then, new process parameters are sent to the modified bonding machine for use for subsequent bonds. With these components, a full self-optimizing system has been implemented.}}, author = {{Meyer , Tobias and Unger, Andreas and Althoff, Simon and Sextro, Walter and Brökelmann, Michael and Hunstig, Matthias and Guth, Karsten}}, booktitle = {{IEEE 66th Electronic Components and Technology Conference}}, keywords = {{Self-optimization, adaptive system, bond process, copper wire}}, pages = {{622--628}}, title = {{{Reliable Manufacturing of Heavy Copper Wire Bonds Using Online Parameter Adaptation}}}, doi = {{10.1109/ECTC.2016.215}}, year = {{2016}}, } @inproceedings{48874, abstract = {{State of the Art inexact solvers of the NP-hard Traveling Salesperson Problem TSP are known to mostly yield high-quality solutions in reasonable computation times. With the purpose of understanding different levels of instance difficulties, instances for the current State of the Art heuristic TSP solvers LKH+restart and EAX+restart are presented which are evolved using a sophisticated evolutionary algorithm. More specifically, the performance differences of the respective solvers are maximized resulting in instances which are easier to solve for one solver and much more difficult for the other. Focusing on both optimization directions, instance features are identified which characterize both types of instances and increase the understanding of solver performance differences.}}, author = {{Bossek, Jakob and Trautmann, Heike}}, booktitle = {{Proceedings of the XV International Conference of the Italian Association for Artificial Intelligence on Advances in Artificial Intelligence - Volume 10037}}, isbn = {{978-3-319-49129-5}}, keywords = {{Combinatorial optimization, Instance hardness, Metaheuristics, Transportation, TSP}}, pages = {{3–12}}, publisher = {{Springer-Verlag}}, title = {{{Understanding Characteristics of Evolved Instances for State-of-the-Art Inexact TSP Solvers with Maximum Performance Difference}}}, doi = {{10.1007/978-3-319-49130-1_1}}, year = {{2016}}, } @article{1772, author = {{Torresen, Jim and Plessl, Christian and Yao, Xin}}, journal = {{IEEE Computer}}, keywords = {{self-awareness, self-expression}}, number = {{7}}, pages = {{18--20}}, publisher = {{IEEE Computer Society}}, title = {{{Self-Aware and Self-Expressive Systems – Guest Editor's Introduction}}}, doi = {{10.1109/MC.2015.205}}, volume = {{48}}, year = {{2015}}, } @article{5704, abstract = {{Advancements in information technology have changed the way customers experience a service encounter and their relationship with service providers. Especially technology-based self-service channels have found their way into the 21st century service economy. While research embraces these channels for their cost-efficiency, it has not examined whether a shift from personal to self-service affects customer–firm relationships. Drawing from the service-dominant logic and its central concept of value-in-context, we discuss customers’ value creation in self-service and personal service channels and examine the long-term impact of these channels on customer retention. Using longitudinal customer data, we investigate how the ratio of self-service versus personal service use influences customer defection over time. Our findings suggest that the ratio of self-service to personal service used affects customer defection in a U-shaped manner, with intermediate levels of both self-service and personal service use being associated with the lowest likelihood of defection. We also find that this effect mitigates over time. We conclude that firms should not shift customers toward self-service channels completely, especially not at the beginning of a relationship. Our study underlines the importance of understanding when and how self-service technologies create valuable customer experiences and stresses the notion of actively managing customers’ cocreation of value. }}, author = {{Scherer, Anne and Wünderlich, Nancy and Von Wangenheim, Florian}}, issn = {{0276-7783.}}, journal = {{MIS Quarterly}}, keywords = {{customer defection, customer retention, e-service, longitudinal, Self-service, value-in-context}}, number = {{1}}, pages = {{177--200}}, publisher = {{MIS RC}}, title = {{{The Value of Self-Service: Long-Term Effects of Technology-Based Self-Service Usage on Customer Retention.}}}, volume = {{39}}, year = {{2015}}, } @inproceedings{4464, abstract = {{A highly selective first study phase in many Swiss study programs leads to a rather competitive climate among students. However, the atmosphere at the university is an important factor for students' transition into Higher Education. An important question in this context is whether students' are equipped with different dispositions influencing how they cope with this transition. Other research has already shown that different groups of students can be identified regarding their student behavior. Yet, so far little is known about patterns of variables characterizing students, transitioning successfully. The paper takes advantage of a person-centered approach, i.e. the latent-class analysis, which makes it possible to identify groups of individuals, sharing common attributes. The research was conducted as a longitudinal study during their first year at a Swiss university. The return rate was about 67%, with 820 utilizable questionnaires at t1. Based on the analysis of students' anxiety, intrinsic motivation and self-efficacy, three distinct classes of students could be identified. The first class can be called the "highly motivated and self-confident" students. The second class is characterized by the same pattern, however, on a more intermediate level and the last class can be described as the "least motivated and most anxious" group of students. This study contributes to research and theory on students' transition into higher education and could be a first hint that students' experiences of this transition can vary substantially.}}, author = {{Brahm, Taiga and Wagner, Dietrich and Jenert, Tobias}}, keywords = {{Quantitative methods, Self-efficacy, Higher education, Motivation and Emotion}}, location = {{Zypern}}, title = {{{A person-centred approach to students' transition into Higher Education}}}, year = {{2015}}, } @article{9944, abstract = {{Eine Vielzahl von Prozessen in der Chemie und Verfahrenstechnik kann durch Ultraschall positiv beeinflusst werden. Oftmals ist ultraschallinduzierte Kavitation der Hauptwirkmechanismus für die positiven Effekte der Beschallung. Daher ist es notwendig die Kavitationsaktivität während des Prozesses zu quantifizieren um die Beschallung für den jeweiligen Prozess optimal gestalten und überwachen zu können. Eine Möglichkeit der prozessbegleitenden Kavitationsdetektion ist die Auswertung der akustischen Emissionen von oszillierenden und kollabierenden Kavitationsblasen mittels Drucksensoren in der Flüssigkeit. Raue Prozessrandbedingungen wie hohe Temperaturen oder aggressive Flüssigkeiten erschweren es jedoch geeignete Sensoren zu finden. Als Alternative wurde daher die Nutzbarkeit der Rückwirkung von Kavitationsereignissen auf das elektrische Eingansgssignal des Ultraschallwandlers zur Quantifizierung von Kavitation untersucht. Die experimentelle Analyse hat ergeben, dass das Einsetzen und in einigen Fällen auch die Art der Kavitation auf Basis der Rückwirkung auf das Stromsignal des Ultraschallwandlers bestimmt werden kann. Die Stärke der Kavitation war hingegen nicht aus den Stromsignalen abzuleiten.}}, author = {{Bornmann, Peter and Hemsel, Tobias and Sextro, Walter and Memoli, Gianluca and Hodnett, Mark and Zeqiri, Bajram}}, journal = {{tm - Technisches Messen}}, keywords = {{Kavitationsdetektion, Self-Sensing, So- nochemie, Ultraschallwandler}}, number = {{2}}, pages = {{73--84}}, title = {{{Kavitationsdetektion mittels Self-Sensing-Ultraschallwandler}}}, doi = {{10.1515/teme-2015-0017}}, volume = {{82}}, year = {{2015}}, } @inproceedings{9949, abstract = {{Intelligent mechatronic systems other the possibility to adapt system behavior to current dependability. This can be used to assure reliability by controlling system behavior to reach a pre-defined lifetime. By using such closed loop control, the margin of error of useful lifetime of an individual system is lowered. It is also possible to change the pre-defined lifetime during operation, by adapting system behavior to derate component usage. When planning maintenance actions, the remaining useful lifetime of each individual system has to be taken into account. Usually, stochastic properties of a fleet of systems are analyzed to create maintenance plans. Among these, the main factor is the probability of an individual system to last until maintenance. If condition-based maintenance is used, this is updated for each individual system using available information about its current state. By lowering the margin of error of useful lifetime, which directly corresponds to the time until maintenance, extended maintenance periods are made possible. Also using reliability-adaptive operation, a reversal of degradation driven maintenance planning is possible where a maintenance plan is setup not only according to system properties, but mainly to requirements imposed by maintenance personnel or infrastructure. Each system then adapts its behavior accordingly and fails according to the maintenance plan, making better use of maintenance personnel and system capabilities at the same time. In this contribution, the potential of maintenance plan driven system behavior adaptation is shown. A model including adaptation process and maintenance actions is simulated over full system lifetime to assess the advantages gained.}}, author = {{Meyer, Tobias and Kaul, Thorben and Sextro, Walter}}, booktitle = {{Proceedings of the 9th IFAC Symposium on Fault Detection, Supervision and Safety for Technical Processes}}, keywords = {{Adaptive systems, Reliability analysis, Availability, Adaptive control, Maintenance, Self-optimizing systems, Self-optimizing control, Stochastic Petri-nets}}, pages = {{940--945}}, title = {{{Advantages of reliability-adaptive system operation for maintenance planning}}}, doi = {{10.1016/j.ifacol.2015.09.647}}, year = {{2015}}, } @inproceedings{10673, author = {{Ho, Nam and Ahmed, Abdullah Fathi and Kaufmann, Paul and Platzner, Marco}}, booktitle = {{Proc. NASA/ESA Conf. Adaptive Hardware and Systems (AHS)}}, keywords = {{cache storage, field programmable gate arrays, multiprocessing systems, parallel architectures, reconfigurable architectures, FPGA, dynamic reconfiguration, evolvable cache mapping, many-core architecture, memory-to-cache address mapping function, microarchitectural optimization, multicore architecture, nature-inspired optimization, parallelization degrees, processor, reconfigurable cache mapping, reconfigurable computing, Field programmable gate arrays, Software, Tuning}}, pages = {{1--7}}, title = {{{Microarchitectural optimization by means of reconfigurable and evolvable cache mappings}}}, doi = {{10.1109/AHS.2015.7231178}}, year = {{2015}}, } @inproceedings{29973, abstract = {{Haushaltsgeräte aus der Klasse der "Weißen Ware" tragen mit etwa einem Drittel ($34,2%$ \citeBDEW2013) zum privaten Energieverbrauch bei. Diese Veröffentlichung präsentiert eine Struktur und die dafür notwendige optimale Betriebsstrategie für Weiße Ware in einer Umgebung mit Strompreisen, die wegen der Volatilität der Regenerativen Energien stark fluktuieren. Das vorgeschlagene Konzept nutzt dafür ein dezentrales Energiemanagementsystem, das über drei Hierarchieebenen verteilt ist: die Geräteebene, die Haushaltsebene und die Ortsnetzebene. Auf der Geräteebene nutzt dieses Konzept zusätzlich Betriebsflexibilitäten der Haushaltsgeräte aus.}}, author = {{Stille, Karl Stephan Christian and Böcker, Joachim and Bettentrup, Ralf and Kaiser, Ingo}}, booktitle = {{ETG-Fachtagung "Von Smart Grids zu Smart Markets"}}, keywords = {{Energy management, hybrid energy storage system, self-optimization, multi-objective optimization, adaptive systems, pareto set, SFB614-D1, SFB614-D2, LEA-Publikation, Eigene}}, publisher = {{VDE}}, title = {{{Hierarchisches Optimierungskonzept für die Laststeuerung von Haushaltsgeräten}}}, year = {{2015}}, } @inproceedings{48838, abstract = {{The majority of algorithms can be controlled or adjusted by parameters. Their values can substantially affect the algorithms’ performance. Since the manual exploration of the parameter space is tedious – even for few parameters – several automatic procedures for parameter tuning have been proposed. Recent approaches also take into account some characteristic properties of the problem instances, frequently termed instance features. Our contribution is the proposal of a novel concept for feature-based algorithm parameter tuning, which applies an approximating surrogate model for learning the continuous feature-parameter mapping. To accomplish this, we learn a joint model of the algorithm performance based on both the algorithm parameters and the instance features. The required data is gathered using a recently proposed acquisition function for model refinement in surrogate-based optimization: the profile expected improvement. This function provides an avenue for maximizing the information required for the feature-parameter mapping, i.e., the mapping from instance features to the corresponding optimal algorithm parameters. The approach is validated by applying the tuner to exemplary evolutionary algorithms and problems, for which theoretically grounded or heuristically determined feature-parameter mappings are available.}}, author = {{Bossek, Jakob and Bischl, Bernd and Wagner, Tobias and Rudolph, Günter}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{978-1-4503-3472-3}}, keywords = {{evolutionary algorithms, model-based optimization, parameter tuning}}, pages = {{1319–1326}}, publisher = {{Association for Computing Machinery}}, title = {{{Learning Feature-Parameter Mappings for Parameter Tuning via the Profile Expected Improvement}}}, doi = {{10.1145/2739480.2754673}}, year = {{2015}}, } @inproceedings{48887, abstract = {{We evaluate the performance of a multi-objective evolutionary algorithm on a class of dynamic routing problems with a single vehicle. In particular we focus on relating algorithmic performance to the most prominent characteristics of problem instances. The routing problem considers two types of customers: mandatory customers must be visited whereas optional customers do not necessarily have to be visited. Moreover, mandatory customers are known prior to the start of the tour whereas optional customers request for service at later points in time with the vehicle already being on its way. The multi-objective optimization problem then results as maximizing the number of visited customers while simultaneously minimizing total travel time. As an a-posteriori evaluation tool, the evolutionary algorithm aims at approximating the related Pareto set for specifically designed benchmarking instances differing in terms of number of customers, geographical layout, fraction of mandatory customers, and request times of optional customers. Conceptional and experimental comparisons to online heuristic procedures are provided.}}, author = {{Meisel, Stephan and Grimme, Christian and Bossek, Jakob and Wölck, Martin and Rudolph, Günter and Trautmann, Heike}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference }}, isbn = {{978-1-4503-3472-3}}, keywords = {{combinatorial optimization, metaheuristics, multi-objective optimization, online algorithms, transportation}}, pages = {{425–432}}, publisher = {{Association for Computing Machinery}}, title = {{{Evaluation of a Multi-Objective EA on Benchmark Instances for Dynamic Routing of a Vehicle}}}, doi = {{10.1145/2739480.2754705}}, year = {{2015}}, } @inproceedings{17661, author = {{King, Thomas C. and Liu, Qingzhi and Polevoy, Gleb and de Weerdt, Mathijs and Dignum, Virginia and van Riemsdijk, M. Birna and Warnier, Martijn}}, booktitle = {{Proceedings of the 2014 International Conference on Autonomous Agents and Multi-agent Systems}}, isbn = {{978-1-4503-2738-1}}, keywords = {{crowd-sensing, crowdsourcing, data aggregation, game theory, norms, reciprocation, self interested agents, simulation}}, pages = {{1651--1652}}, publisher = {{International Foundation for Autonomous Agents and Multiagent Systems}}, title = {{{Request Driven Social Sensing}}}, year = {{2014}}, } @inproceedings{9879, abstract = {{Application of prognostics and health management (PHM) in the field of Proton Exchange Membrane (PEM) fuel cells is emerging as an important tool in increasing the reliability and availability of these systems. Though a lot of work is currently being conducted to develop PHM systems for fuel cells, various challenges have been encountered including the self-healing effect after characterization as well as accelerated degradation due to dynamic loading, all which make RUL predictions a difficult task. In this study, a prognostic approach based on adaptive particle filter algorithm is proposed. The novelty of the proposed method lies in the introduction of a self-healing factor after each characterization and the adaption of the degradation model parameters to fit to the changing degradation trend. An ensemble of five different state models based on weighted mean is then developed. The results show that the method is effective in estimating the remaining useful life of PEM fuel cells, with majority of the predictions falling within 5\% error. The method was employed in the IEEE 2014 PHM Data Challenge and led to our team emerging the winner of the RUL category of the challenge.}}, author = {{Kimotho, James Kuria and Meyer, Tobias and Sextro, Walter}}, booktitle = {{Prognostics and Health Management (PHM), 2014 IEEE Conference on}}, keywords = {{ageing, particle filtering (numerical methods), proton exchange membrane fuel cells, remaining life assessment, PEM fuel cell prognostics, PHM, RUL predictions, accelerated degradation, adaptive particle filter algorithm, dynamic loading, model parameter adaptation, prognostics and health management, proton exchange membrane fuel cells, remaining useful life estimation, self-healing effect, Adaptation models, Data models, Degradation, Estimation, Fuel cells, Mathematical model, Prognostics and health management}}, pages = {{1--6}}, title = {{{PEM fuel cell prognostics using particle filter with model parameter adaptation}}}, doi = {{10.1109/ICPHM.2014.7036406}}, year = {{2014}}, } @inproceedings{9884, abstract = {{So-called reliability adaptive systems are able to adapt their system behavior based on the current reliability of the system. This allows them to react to changed operating conditions or faults within the system that change the degradation behavior. To implement such reliability adaptation, self-optimization can be used. A self-optimizing system pursues objectives, of which the priorities can be changed at runtime, in turn changing the system behavior. When including system reliability as an objective of the system, it becomes possible to change the system based on the current reliability as well. This capability can be used to control the reliability of the system throughout its operation period in order to achieve a pre-defined or user-selectable system lifetime. This way, optimal planning of maintenance intervals is possible while also using the system capabilities to their full extent. Our proposed control system makes it possible to react to changed degradation behavior by selecting objectives of the self-optimizing system and in turn changing the operating parameters in a closed loop. A two-stage controller is designed which is used to select the currently required priorities of the objectives in order to fulfill the desired usable lifetime. Investigations using a model of an automotive clutch system serve to demonstrate the feasibility of our controller. It is shown that the desired lifetime can be achieved reliably.}}, author = {{Meyer , Tobias and Sextro, Walter}}, booktitle = {{Proceedings of the Second European Conference of the Prognostics and Health Management Society 2014}}, keywords = {{self-optimization reliability adaptive}}, title = {{{Closed-loop Control System for the Reliability of Intelligent Mechatronic Systems}}}, volume = {{5}}, year = {{2014}}, } @article{9885, abstract = {{Intelligent mechatronic systems, such as self-optimizing systems, allow an adaptation of the system behavior at runtime based on the current situation. To do so, they generally select among several pre-defined working points. A common method to determine working points for a mechatronic system is to use model-based multiobjective optimization. It allows finding compromises among conflicting objectives, called objective functions, by adapting parameters. To evaluate the system behavior for different parameter sets, a model of the system behavior is included in the objective functions and is evaluated during each function call. Intelligent mechatronic systems also have the ability to adapt their behavior based on their current reliability, thus increasing their availability, or on changed safety requirements; all of which are summed up by the common term dependability. To allow this adaptation, dependability can be considered in multiobjective optimization by including dependability-related objective functions. However, whereas performance-related objective functions are easily found, formulation of dependability-related objective functions is highly system-specific and not intuitive, making it complex and error-prone. Since each mechatronic system is different, individual failure modes have to be taken into account, which need to be found using common methods such as Failure-Modes and Effects Analysis or Fault Tree Analysis. Using component degradation models, which again are specific to the system at hand, the main loading factors can be determined. By including these in the model of the system behavior, the relation between working point and dependability can be formulated as an objective function. In our work, this approach is presented in more detail. It is exemplified using an actively actuated single plate dry clutch system. Results show that this approach is suitable for formulating dependability-related objective functions and that these can be used to extend system lifetime by adapting system behavior.}}, author = {{Meyer , Tobias and Sondermann-Wölke, Christoph and Sextro, Walter}}, journal = {{Conference Proceedings of the 2nd International Conference on System-Integrated Intelligence}}, keywords = {{Self-optimization, multiobjective optimization, objective function, dependability, intelligent system, behavior adaptation}}, pages = {{46--53}}, title = {{{Method to Identify Dependability Objectives in Multiobjective Optimization Problem}}}, doi = {{10.1016/j.protcy.2014.09.033}}, volume = {{15}}, year = {{2014}}, } @inproceedings{10677, author = {{Ho, Nam and Kaufmann, Paul and Platzner, Marco}}, booktitle = {{2014 {IEEE} Intl. Conf. on Evolvable Systems (ICES)}}, keywords = {{Linux, cache storage, embedded systems, granular computing, multiprocessing systems, reconfigurable architectures, Leon3 SPARe processor, custom logic events, evolvable-self-adaptable processor cache, fine granular profiling, integer unit events, measurement infrastructure, microarchitectural events, multicore embedded system, perf_event standard Linux performance measurement interface, processor properties, run-time reconfigurable memory-to-cache address mapping engine, run-time reconfigurable multicore infrastructure, split-level caching, Field programmable gate arrays, Frequency locked loops, Irrigation, Phasor measurement units, Registers, Weaving}}, pages = {{31--37}}, title = {{{Towards self-adaptive caches: A run-time reconfigurable multi-core infrastructure}}}, doi = {{10.1109/ICES.2014.7008719}}, year = {{2014}}, } @inproceedings{13324, abstract = {{The new technological enhancements and the accessibility to varieties of online applications, enable users to collect personal data and perform self-evaluation through test, comparison and experimentation. The sparked interest in numbers and numbers as self-representative visualisations is prominent in social networking sites, which are the empirical setting for the present study. This paper sets out to establish a multi-theoretical framework which enables the investigation of emerging phenomena of the role of numbers in social networking sites. The proposed framework rests on three theoretical pillars: self-determination theory, heuristic decision making and behavioural economics. A discussion departs from these convictions to investigate user reactions and behaviour when faced with numerical representations in the SNS.}}, author = {{Sjöklint, Mimmi and Constantiou, Ioanna and Trier, Matthias}}, booktitle = {{ECIS 2013 Proceedings}}, isbn = {{9783834924421}}, keywords = {{User Behaviour, Social Networking Sites, Numerical Representations, Multi-Theoretical Framework, Quantified Self, Pointification}}, publisher = {{Association for Information Systems. AIS Electronic Library (AISeL)}}, title = {{{Numerical Representations and User Behaviour in Social Networking Sites: Towards a Multi- Theoretical Research Framework}}}, year = {{2013}}, } @inproceedings{37109, abstract = {{This study examines the effect of audit on private firms’ cost of debt. We use a sample of 1,949 small private firms operating in the period 2006-2010 with optional financial statement audit. High quality data allows us to construct a more precise interest rate measure than existing studies employ. After controlling for obvious sources of demand for voluntary audits (ownership complexity, subsidiary status, bank relations), we find a robust central result that voluntary audits increase rather than decrease the cost of debt financing, contrary to several existing studies. This finding indicates that voluntary audits are generally treated as “adopting a label” and penalised by creditors, regardless of the perceived auditor quality as a result of the lemon problem in the audit market. Even Big-4 audits increase the cost of debt, likely as a result due to the lemon problem in the audit market, although the increase is smaller than for non-Big-4 audits. The results are sensitive to the estimation method used (OLS, Heckman’s two-step, PSM) and (sub-)sample selection. We show that disregarding the underlying assumptions of these estimation methods may lead to incorrect inferences. Additional analyses show that audited firms’ reported earnings are less informative about future operating performance than earnings of their unaudited counterparts. Our results also indicate that results are sensitive to cost of debt definition and this might have affected the results reported in the existing literature.}}, author = {{Kosi, Urska and Koren, Jerney and Valentincic, Aljosa}}, keywords = {{private firms, voluntary audit, cost of debt, self-selection bias, lemon problem}}, location = {{Paris, France}}, title = {{{Does Financial Statement Audit Reduce the Cost of Debt of Private Firms?}}}, year = {{2013}}, } @inproceedings{11832, abstract = {{In this paper we propose an approach to retrieve the absolute geometry of an acoustic sensor network, consisting of spatially distributed microphone arrays, from reverberant speech input. The calibration relies on direction of arrival measurements of the individual arrays. The proposed calibration algorithm is derived from a maximum-likelihood approach employing circular statistics. Since a sensor node consists of a microphone array with known intra-array geometry, we are able to obtain an absolute geometry estimate, including angles and distances. Simulation results demonstrate the effectiveness of the approach.}}, author = {{Jacob, Florian and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}}, booktitle = {{38th International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2013)}}, issn = {{1520-6149}}, keywords = {{Geometry calibration, microphone arrays, position self-calibration}}, pages = {{116--120}}, title = {{{DoA-Based Microphone Array Position Self-Calibration Using Circular Statistic}}}, doi = {{10.1109/ICASSP.2013.6637620}}, year = {{2013}}, } @inproceedings{22737, author = {{Becker, Matthias and Luckey, Markus and Becker, Steffen}}, booktitle = {{{Proceedings of the 8th International ACM SIGSOFT Conference on Quality of Software Architectures (QoSA)}}}, isbn = {{978-1-4503-1346-9}}, keywords = {{model-driven performance engineering, self-*, Self-adaptation, software performance}}, pages = {{117--122}}, publisher = {{ACM}}, title = {{{Model-driven Performance Engineering of Self-adaptive Systems: A Survey}}}, doi = {{10.1145/2304696.2304716}}, year = {{2012}}, } @article{5718, abstract = {{The role of information and communication technology for economic growth has been emphasized repeatedly. Technological breakthroughs have generated new forms of services, such as self-services or remote services. Although these encounters are qualitatively different from traditional service provision, prior service management literature thus far had paid little attention to theory development and the systematization of technology-based service encounters. To fill this research gap, the present study outlines how new types of technology-based services fit into existing service typologies and provides an extension of existing frameworks to capture their unique characteristics. These insights in turn offer managerial implications and highlight open research questions.}}, author = {{Schumann, Jan H and Wünderlich, Nancy and Wangenheim, Florian}}, journal = {{Technovation}}, keywords = {{Services, Remote services, Self-services, Technology mediation}}, number = {{2}}, pages = {{133--143}}, publisher = {{Elsevier}}, title = {{{Technology Mediation in Service Delivery: A New Typology and an Agenda for Managers and Academics.}}}, volume = {{32}}, year = {{2012}}, } @inproceedings{9783, abstract = {{To optimize the ultrasound irradiation for cavitation based ultrasound applications like sonochemistry or ultrasound cleaning, the correlation between cavitation intensity and the resulting effect on the process is of interest. Furthermore, changing conditions like temperature and pressure result in varying acoustic properties of the liquid. That might necessitate an adaption of the ultrasound irradiation. To detect such changes during operation, process monitoring is desired. Labor intensive processes, that might be carried out for several hours, also require process monitoring to increase their reliability by detection of changes or malfunctions during operation. In some applications cavitation detection and monitoring can be achieved by the application of sensors in the sound field. Though the application of sensors is possible, this necessitates modifications on the system and the sensor might disturb the sound field. In other applications harsh, process conditions prohibit the application of sensors in the sound field. Therefore alternative techniques for cavitation detection and monitoring are desired. The applicability of an external microphone and a self-sensing ultrasound transducer for cavitation detection were experimentally investigated. Both methods were found to be suitable and easily applicable.}}, author = {{Bornmann, Peter and Hemsel, Tobias and Sextro, Walter and Maeda, Takafumi and Morita, Takeshi}}, booktitle = {{Ultrasonics Symposium (IUS), 2012 IEEE International}}, issn = {{1948-5719}}, keywords = {{cavitation, chemical reactors, microphones, process monitoring, reliability, ultrasonic applications, ultrasonic waves, acoustic properties, cavitation based ultrasound applications, cavitation intensity, change detection reliability, external microphone, malfunction detection reliability, nonperturbing cavitation detection, nonperturbing cavitation monitoring, process monitoring, self-sensing ultrasound transducer, sonochemical reactors, sonochemistry, ultrasound cleaning, ultrasound irradiation, Acoustics, Liquids, Monitoring, Sensors, Sonar equipment, Transducers, Ultrasonic imaging}}, pages = {{1141--1144}}, title = {{{Non-perturbing cavitation detection / monitoring in sonochemical reactors}}}, doi = {{10.1109/ULTSYM.2012.0284}}, year = {{2012}}, } @article{9786, abstract = {{Self-optimizing mechatronic systems are a new class of technical systems. On the one hand, new challenges regarding dependability arise from their additional complexity and adaptivity. On the other hand, their abilities enable new concepts and methods to improve the dependability of mechatronic systems. This paper introduces a multi-level dependability concept for selfoptimizing mechatronic systems and shows how probabilistic planning can be used to improve the availability and reliability of systems in the operating phase. The general idea to improve the availability of autonomous systems by applying probabilistic planning methods to avoid energy shortages is exemplified on the example of an innovative railway vehicle.}}, author = {{Klöpper, Benjamin and Sondermann-Wölke, Christoph and Romaus, Christoph}}, journal = {{Journal of Robotics and Mechatronics}}, keywords = {{self-optimizing systems, dependability, probabilistic planning, energy management}}, number = {{1}}, pages = {{5--15}}, title = {{{Probabilistic Planning for Predictive Condition Monitoring and Adaptation Within the Self-Optimizing Energy Management of an Autonomous Railway Vehicle}}}, volume = {{24}}, year = {{2012}}, } @inproceedings{11833, abstract = {{In this paper we propose an approach to retrieve the geometry of an acoustic sensor network consisting of spatially distributed microphone arrays from unconstrained speech input. The calibration relies on Direction of Arrival (DoA) measurements which do not require a clock synchronization among the sensor nodes. The calibration problem is formulated as a cost function optimization task, which minimizes the squared differences between measured and predicted observations and additionally avoids the existence of minima that correspond to mirrored versions of the actual sensor orientations. Further, outlier measurements caused by reverberation are mitigated by a Random Sample Consensus (RANSAC) approach. The experimental results show a mean positioning error of at most 25 cm even in highly reverberant environments.}}, author = {{Jacob, Florian and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}}, booktitle = {{International Workshop on Acoustic Signal Enhancement (IWAENC 2012)}}, keywords = {{Unsupervised, geometry calibration, microphone arrays, position self-calibration}}, title = {{{Microphone Array Position Self-Calibration from Reverberant Speech Input}}}, year = {{2012}}, } @inproceedings{46397, abstract = {{In multiobjective optimization, set-based performance indicators are commonly used to assess the quality of a Pareto front approximation. Based on the scalarization obtained by these indicators, a performance comparison of multiobjective optimization algorithms becomes possible. The R2 and the Hypervolume (HV) indicator represent two recommended approaches which have shown a correlated behavior in recent empirical studies. Whereas the HV indicator has been comprehensively analyzed in the last years, almost no studies on the R2 indicator exist. In this paper, we thus perform a comprehensive investigation of the properties of the R2 indicator in a theoretical and empirical way. The influence of the number and distribution of the weight vectors on the optimal distribution of μ solutions is analyzed. Based on a comparative analysis, specific characteristics and differences of the R2 and HV indicator are presented.}}, author = {{Brockhoff, Dimo and Wagner, Tobias and Trautmann, Heike}}, booktitle = {{Proceedings of the 14th Annual Conference on Genetic and Evolutionary Computation}}, isbn = {{9781450311779}}, keywords = {{hypervolume indicator, multiobjective optimization, performance assessment, r2 indicator}}, pages = {{465–472}}, publisher = {{Association for Computing Machinery}}, title = {{{On the Properties of the R2 Indicator}}}, doi = {{10.1145/2330163.2330230}}, year = {{2012}}, } @inproceedings{46396, abstract = {{The steady supply of new optimization methods makes the algorithm selection problem (ASP) an increasingly pressing and challenging task, specially for real-world black-box optimization problems. The introduced approach considers the ASP as a cost-sensitive classification task which is based on Exploratory Landscape Analysis. Low-level features gathered by systematic sampling of the function on the feasible set are used to predict a well-performing algorithm out of a given portfolio. Example-specific label costs are defined by the expected runtime of each candidate algorithm. We use one-sided support vector regression to solve this learning problem. The approach is illustrated by means of the optimization problems and algorithms of the BBOB’09/10 workshop.}}, author = {{Bischl, Bernd and Mersmann, Olaf and Trautmann, Heike and Preuß, Mike}}, booktitle = {{Proceedings of the 14th Annual Conference on Genetic and Evolutionary Computation}}, isbn = {{9781450311779}}, keywords = {{machine learning, exploratory landscape analysis, fitness landscape, benchmarking, evolutionary optimization, bbob test set, algorithm selection}}, pages = {{313–320}}, publisher = {{Association for Computing Machinery}}, title = {{{Algorithm Selection Based on Exploratory Landscape Analysis and Cost-Sensitive Learning}}}, doi = {{10.1145/2330163.2330209}}, year = {{2012}}, } @inproceedings{46401, abstract = {{Exploratory Landscape Analysis subsumes a number of techniques employed to obtain knowledge about the properties of an unknown optimization problem, especially insofar as these properties are important for the performance of optimization algorithms. Where in a first attempt, one could rely on high-level features designed by experts, we approach the problem from a different angle here, namely by using relatively cheap low-level computer generated features. Interestingly, very few features are needed to separate the BBOB problem groups and also for relating a problem to high-level, expert designed features, paving the way for automatic algorithm selection.}}, author = {{Mersmann, Olaf and Bischl, Bernd and Trautmann, Heike and Preuss, Mike and Weihs, Claus and Rudolph, Günter}}, booktitle = {{Proceedings of the 13th Annual Conference on Genetic and Evolutionary Computation}}, isbn = {{9781450305570}}, keywords = {{exploratory landscape analysis, evolutionary optimization, fitness landscape, benchmarking, BBOB test set}}, pages = {{829–836}}, publisher = {{Association for Computing Machinery}}, title = {{{Exploratory Landscape Analysis}}}, doi = {{10.1145/2001576.2001690}}, year = {{2011}}, } @inproceedings{23858, abstract = {{A large proportion of plastics today is compounded, which means the process from refining a raw material to the processable material. For this process compounding extruders are used which mostly involve tightly intermeshing, co-rotating twin screw extruders. These extruders consist of two closely spaced screws which rotate in the same direction and convey the raw material to the screw tip. These screws are surrounded by several barrel modules which heat or cool the material. As the whole design of the machine is modularly arranged the process behavior of a twin screw extruder depends for the main part on the arrangement of the screw and the barrel elements. Until today this arrangement and process optimization is conducted by experienced engineers and with the help of trial-and-error methods. Furthermore, theoretical models are used with which the behavior of the extruder is estimated. As these models are mostly very complex they are only made available with the realization in different software projects. One of the tools is called SIGMA. Within this paper SIGMA is introduced as a software to optimize a twin screw extruder. SIGMA supports the engineer already in the early stages of the extruder arrangement.}}, author = {{Kretzschmar, Nils and Schöppner, Volker}}, booktitle = {{Proceedings of the 2010 Summer Computer Simulation Conference}}, keywords = {{process optimization, polymer engineering, compounding, twin screw extruder, simulation}}, pages = {{133–140}}, publisher = {{Society for Computer Simulation International}}, title = {{{Simulating Tightly Intermeshing, Co-Rotating Twin Screw Extruders with SIGMA}}}, year = {{2010}}, }