@inproceedings{22309,
  abstract     = {{Approximate computing (AC) has acquired significant maturity in recent years as a promising approach to obtain energy and area-efficient hardware. Automated approximate accelerator synthesis involves a great deal of complexity on the size of design space which exponentially grows with the number of possible approximations. Design space exploration of approximate accelerator synthesis is usually targeted via heuristic-based search methods. The majority of existing frameworks prune a large part of the design space using a greedy-based approach to keep the problem tractable. Therefore, they result in inferior solutions since many potential solutions are neglected in the pruning process without the possibility of backtracking of removed approximate instances. In this paper, we address the aforementioned issue by adopting Monte Carlo Tree Search (MCTS), as an efficient stochastic learning-based search algorithm, in the context of automated synthesis of approximate accelerators. This enables the synthesis frameworks to deeply subsamples the design space of approximate accelerator synthesis toward most promising approximate instances based on the required performance goals, i.e., power consumption, area, or/and delay. We investigated the challenges of providing an efficient open-source framework that benefits analytical and search-based approximation techniques simultaneously to both speed up the synthesis runtime and improve the quality of obtained results. Besides, we studied the utilization of machine learning algorithms to improve the performance of several critical steps, i.e., accelerator quality testing, in the synthesis framework. The proposed framework can help the community to rapidly generate efficient approximate accelerators in a reasonable runtime.}},
  author       = {{Awais, Muhammad and Platzner, Marco}},
  booktitle    = {{Proceedings of IEEE Computer Society Annual Symposium on VLSI}},
  keywords     = {{Approximate computing, Design space exploration, Accelerator synthesis}},
  location     = {{Tampa, Florida USA (Virtual)}},
  pages        = {{384--389}},
  publisher    = {{IEEE}},
  title        = {{{MCTS-Based Synthesis Towards Efficient Approximate Accelerators}}},
  year         = {{2021}},
}

@inproceedings{21727,
  abstract     = {{Platform-based business models underlie the success of many of today’s largest, fastest-growing, and most disruptive companies. Despite the success of prominent examples, such as Uber and Airbnb, creating a profitable platform ecosystem presents a key challenge for many companies across all industries. Although research provides knowledge about platforms’ different value drivers (e.g., network effects), companies that seek to transform their current business model into a platform-based one lack an artifact to reduce knowledge boundaries, collaborate effectively, and cope with the complexities and dynamics of platform ecosystems. We address this challenge by developing two artifacts and combining research from variability modeling, business model dependencies, and system dynamics. This paper presents a design science research approach to develop the platform ecosystem modeling language and the platform ecosystem development tool that support researcher and practitioner by visualizing and simulating platform ecosystems. }},
  author       = {{Vorbohle, Christian and Gottschalk, Sebastian}},
  booktitle    = {{Proceedings of the 29th European Conference on Information Systems (ECIS)}},
  keywords     = {{Platform Ecosystems, Platform Ecosystem Modeling Language, Platform Ecosystem Development Tool, Business Models, Design Science}},
  location     = {{Virtual Conference/Workshop}},
  publisher    = {{AIS}},
  title        = {{{Towards Visualizing and Simulating Business Models in Dynamic Platform Ecosystems }}},
  year         = {{2021}},
}

@phdthesis{28683,
  abstract     = {{In den letzten Jahren haben sich Software-Ökosysteme als neue, erfolgreiche Geschäftsform etabliert. Unternehmen agieren hierbei als Anbieter von Software-Plattformen, auf denen Drittanbieter Softwarelösungen für den Markt anbieten können.  Etablierte Beispiele sind hierbei sogenannte App-Stores, die z.B. von Google oder Apple angeboten werden.

Beim Aufbau von Software-Ökosystemen müssen vom Plattformanbieter viele architektonische Entwurfsentscheidungen getroffen werden. Bisher gibt es keine Architekturrichtlinien und -werkzeuge, die den Entwurf einer Ökosystemarchitektur unterstützen. Dadurch fehlt hier systematisches, wiederverwendbares Wissen. Plattformanbieter müssen auf ad-hoc Entscheidungen zurückgreifen. Dies kann dann zu Problemen im Betrieb der Software-Plattformen führen, zu erhöhten Ausfallrisiken und Mehrkosten.

Der Mangel an Architekturwissen manifestiert sich konkret in zwei Gruppen von Herausforderungen: Erstens fehlt eine Wissensbasis zu Architekturalternativen und zweitens fehlt es an methodischem Wissen zu Entwicklung und Betrieb von Software-Ökosystemen. Eine Architekturwissensbasis würde Orientierungshilfen zu den Bestandteilen von Software-Ökosystemen und deren Abhängigkeiten geben, während methodisches Wissen die Erstellung dieser Systeme erleichtern würde.

In der Dissertation werden diese Herausforderungen durch die Entwicklung des Frameworks SecoArc für die Modellierung von Software-Ökosystemen angegangen. Der Beitrag der Dissertation ist zweifach: 
1.	Das SecoArc-Framework umfasst eine Architekturwissensbasis, die wiederverwendbare Architekturentwurfsentscheidungen
von Software-Ökosystemen enthält. Die Wissensbasis wurde entwickelt, indem das Architekturwissen bestehender Ökosysteme sowie aus existierender Fachliteratur ermittelt wurde und in einer Produktlinie für Software-Ökosysteme konsolidiert wurde. Die Produktlinie umfasst architektonische Gemeinsamkeiten und Variabilitäten von Software-Ökosystemen. 
2.	Das SecoArc-Framework liefert methodisches Wissen, um die Ökosystemarchitektur in Modellen zu entwerfen und zu analysieren. Dieses Wissen wurde entwickelt, indem drei Architekturmuster identifiziert wurden. Jedes Muster erfasst unterschiedliche Beziehungen zwischen architektonischen Entwurfsentscheidungen zu den Qualitätsmerkmalen einer Ökosystemgesundheit und der Erreichung von Geschäftszielen. 

Die Architekturmuster und die Produktlinie wurden dazu genutzt, ein Modellierungsframework zu entwickeln und in Form eines Prototypen umzusetzen, welches einen Entwurfsprozess, eine Modellierungssprache und eine Architekturanalysetechnik umfasst. Es erleichtert das Modellieren, Analysieren und Vergleichen von Ökosystemarchitekturen.

Die Ergebnisse der Dissertation wurden im Rahmen von zwei Studien evaluiert. In der ersten Validierungsstudie wurden das Framework sowie der Prototyp verwendet, um zwei alternative Ökosystemarchitekturen zu entwerfen und zu analysieren. In der zweiten Studie wurde eine Analyse von existierenden Ökosystemen basierend auf den architektonischen Variabilitäten des Frameworks durchgeführt.}},
  author       = {{Schwichtenberg, Bahar}},
  keywords     = {{Enterprise Architecture, Architectural Design Decisions, Open Platforms}},
  title        = {{{Modeling and Analyzing Software Ecosystems}}},
  doi          = {{10.17619/UNIPB/1-1270 }},
  year         = {{2021}},
}

@inproceedings{29899,
  abstract     = {{LLC resonant converters are typically unsuitable to be applied for wide voltage-transfer ratio applications. With a full-bridge inverter, however, they can be operated in a variety of different modulations. Most notably, by permanently turning on one MOSFET and turning off the other MOSFET of the same bridge leg, the LLC can be operated in half-bridge configuration reducing the gain by a factor of two. The resonant capacitor is hereby charged to an average voltage of half the input voltage. In this modulation, however, the switch that is permanently turned on is stressed by the complete resonant current while exhibiting no switching losses. This paper proves that the frequency-doubler modulation can better balance the losses among all MOSFETs and should be the preferred mode of operation favored over the conventional half-bridge modulation. This paper analyzes the beneficial loss distribution, proposes an on-the-fly morphing modulation and discusses potential operating strategies to further reduce the junction temperature. Furthermore, it is shown that this modulation can also be altered to achieve the asymmetrical LLC operation. Experimental measurement results show that the modulation results in a substantial decrease of the maximum MOSFET temperature and shows that the converter can be smoothly transitioned during operation from full-bridge modulation to the frequency-doubler half-bridge operation and back.}},
  author       = {{Rehlaender, Philipp and Unruh, Roland and Hankeln, Lars and Schafmeister, Frank and Böcker, Joachim}},
  booktitle    = {{23rd European Conference on Power Electronics and Applications (EPE'21 ECCE Europe)}},
  isbn         = {{978-9-0758-1537-5}},
  keywords     = {{Resonant converter, High frequency power converter, Switched-mode power supply, Converter control, Control methods for electrical systems}},
  location     = {{Ghent, Belgium}},
  publisher    = {{IEEE}},
  title        = {{{Frequency-Doubler Modulation for Reduced Junction Temperatures for LLC Resonant Converters Operated in Half-Bridge Configuration}}},
  doi          = {{10.23919/EPE21ECCEEurope50061.2021.9570674}},
  year         = {{2021}},
}

@inbook{22930,
  abstract     = {{Self-piercing riveting is an established technique for joining multi-material structures in car body manufacturing. Rivets for self-piercing riveting differ in their geometry, the material used, the condition of the material and their surface condition. To shorten the manufacturing process by omitting the heat treatment and the coating process, the authors have elaborated a concept for the use of stainless steel with high strain hardening as a rivet material. The focus of the present investigation is on the evaluation of the influences of the rivet’s geometry and material on its deformation behaviour. Conventional rivets of types P and HD2, a rivet with an improved geometry made of treatable steel 38B2, and rivets made of the stainless steels 1.3815 and 1.4541 are examined. The analysis is conducted by means of multi-step joining tests for two material combinations comprising high-strength steel HCT70X and aluminium EN AW-5083. The joints are cut to provide a cross-section and the deformation behaviour of the different rivets is analysed on the basis of the measured changes in geometry and hardness. In parallel, an examination of the force-stroke curves provides further insights. It can be demonstrated that, besides the geometry, the material strength, in particular, has a significant influence on the deformation behaviour of the rivet. The strength of steel 1.4541 is seen to be too low for the joining task, while the strength of steel 1.3815 is sufficient, and hence the investigation confirms the capability of rivets made of 1.3815 for joining even challenging material combinations.}},
  author       = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}},
  booktitle    = {{Forming the Future - Proceedings of the 13th International Conference on the Technology of Plasticity. The Minerals, Metals & Materials Series.}},
  editor       = {{Daehn, Glenn and Cao, Jian and Kinsey, Brad and Tekkaya, Erman and Vivek, Anupam and Yoshida, Yoshinori}},
  keywords     = {{Self-piercing riveting, Lightweight design, Deformation behaviour, Stainless steel, High nitrogen steel}},
  pages        = {{1495--1506}},
  publisher    = {{Springer}},
  title        = {{{Self-Piercing Riveting Using Rivets Made of Stainless Steel with High Strain Hardening}}},
  doi          = {{10.1007/978-3-030-75381-8_124}},
  year         = {{2021}},
}

@inbook{21542,
  abstract     = {{Using near-field (NF) scan data to predict the far-field (FF) behaviour of radiating electronic systems represents a novel method to accompany the whole RF design process. This approach involves so-called Huygens' box as an efficient radiation model inside an electromagnetic (EM) simulation tool and then transforms the scanned NF measured data into the FF. For this, the basic idea of the Huygens'box principle and the NF-to-FF transformation are briefly presented. The NF is measured on the Huygens' box around a device under test using anNF scanner, recording the magnitude and phase of the site-related magnetic and electric components. A comparison between a fullwave simulation and the measurement results shows a good similarity in both the NF and the simulated and transformed FF.Thus, this method is applicable to predict the FF behaviour of any electronic system by measuring the NF. With this knowledge, the RF design can be improved due to allowing a significant reduction of EM compatibility failure at the end of the development flow. In addition, the very efficient FF radiation model can be used for detailed investigations in various environments and the impact of such an equivalent radiation source on other electronic systems can be assessed.}},
  author       = {{Schröder, Dominik and Lange, Sven and Hangmann, Christian and Hedayat, Christian}},
  booktitle    = {{Tensorial Analysis of Networks (TAN) Modelling for PCB Signal Integrity and EMC Analysis}},
  isbn         = {{9781839530494}},
  keywords     = {{Huygens' box, NF-to-FF transformation, efficient FF radiation model, FF behaviour, EMI assessment, PCB, near-field measurements, efficient radiation model, far-field behaviour, RF design process, far-field prediction, Huygens'box principle, fullwave simulation, electronic system radiation, equivalent radiation source, electromagnetic simulation tool, near-field scan data, EM compatibility failure reduction}},
  pages        = {{315--346 (32)}},
  publisher    = {{ The Institution of Engineering and Technology (IET)}},
  title        = {{{Far-field prediction combining simulations with near-field measurements for EMI assessment of PCBs}}},
  doi          = {{10.1049/pbcs072e_ch14}},
  year         = {{2020}},
}

@inproceedings{17055,
  abstract     = {{Understanding a new literature corpus can be a grueling experience for junior scholars. Nevertheless, corresponding guidelines have not been updated for decades. We contend that the traditional strategy of skimming all papers and reading selected papers afterwards needs to be revised. Therefore, we design a new strategy that guides the overall exploratory process by prioritizing influential papers for initial reading, followed by skimming the remaining papers. Consistent with schemata theory, starting with in-depth reading allows readers to acquire more substantial prior content schemata, which are representa-tive for the literature corpus and useful in the following skimming process. To this end, we develop a prototype that identifies the influential papers from a set of PDFs, which is illustrated in a case study in the IT business value domain. With the new strategy, we envision a more efficient process of exploring unknown literature corpora.}},
  author       = {{Wagner, Gerit and Empl, Philipp and Schryen, Guido}},
  booktitle    = {{28th European Conference on Information Systems (ECIS 2020)}},
  keywords     = {{Reading and skimming, Exploring literature, Review methodology, Design science research, Schemata theory}},
  location     = {{Marrakesh, Morocco}},
  title        = {{{Designing a Novel Strategy for Exploring Literature Corpora}}},
  year         = {{2020}},
}

@inproceedings{16285,
  abstract     = {{To  decide  in  which  part  of  town to  open  stores,  high  street  retailers consult  statistical  data  on  customers  and  cities,  but  they  cannot  analyze  their customers’  shopping  behavior  and  geospatial  features  of  a  city  due  to  missing data.  While  previous  research  has  proposed  recommendation  systems  and decision  aids  that  address  this  type  of  decision  problem –  including  factory location  and  assortment  planning –  there  currently  is no design  knowledge available  to  prescribe  the  design  of  city  center  area  recommendation  systems (CCARS).   We   set   out   to   design   a   software   prototype   considering   local customers’  shopping  interests  and  geospatial  data  on  their  shopping  trips  for retail site selection.  With real data on 500 customers and 1,100 shopping trips, we demonstrate and evaluate our IT artifact. Our results illustrate how retailers and public town center managers can use CCARS for spatial location selection, growing retailers’ profits and a city center’s attractiveness for its citizens.}},
  author       = {{zur Heiden, Philipp and Berendes, Carsten Ingo and Beverungen, Daniel}},
  booktitle    = {{Proceedings of the 15th International Conference on Wirtschaftsinformatik}},
  keywords     = {{Town Center Management, High Street Retail, Recommender Systems, Geospatial Recommendations, Design Science Research}},
  location     = {{Potsdam}},
  title        = {{{Designing City Center Area Recommendation Systems }}},
  doi          = {{doi.org/10.30844/wi_2020_e1-heiden}},
  year         = {{2020}},
}

@article{48366,
  abstract     = {{The proportion of freshmen enrolled in dual study programmes has steadily increased in recent years. From the perspective of potential students, these programmes are highly attractive because they combine types of learning that used to be largely separate at an institutional level: vocational and academic learning. In training-integrated dual study programmes, different institutional contexts, governance regimes, teaching styles and learning environments make bridging these two worlds of learning a challenge for both educators and learners. However, these programmes also allow leeway for didactic innovation, through the cooperation of different types of educational institutions and through new ways of using available didactic methods, and for establishing a new relationship between higher education (HE) and vocational education and training (VET). This paper positions training-integrated dual study programmes as an object of design-based research (DBR). By developing and using an extended model for the pedagogic development of HEIs, “pädagogische Hochschulentwicklung” (Brahm, Jenert, &amp; Euler, 2016a, p. 19; Euler, 2013, p. 360), the paper systematically identifies generic educational problems in these hybrids. Based on a literature review, this paper classifies and explains the design challenges at the level of the learning environment, the study programme and the organisation. The challenges revolve mainly around the cooperation and integration of HE and VET. The paper concludes with an outlook on future DBR projects designing dual studies.}},
  author       = {{Mordhorst, Lisa and Gössling, Bernd}},
  issn         = {{2511-0667}},
  journal      = {{EDeR. Educational Design Research}},
  keywords     = {{Dual study programmes, Design challenges, Pedagogic development of HEIs, Literature review, Study programme development, DBR cycle}},
  number       = {{1}},
  publisher    = {{Staats- und Universitatsbibliothek Hamburg Carl von Ossietzky}},
  title        = {{{Dual Study Programmes as a Design Challenge: Identifying Areas for Improvement as a Starting Point for Interventions}}},
  doi          = {{10.15460/eder.4.1.1482}},
  volume       = {{4}},
  year         = {{2020}},
}

@inproceedings{48850,
  abstract     = {{Sequential model-based optimization (SMBO) approaches are algorithms for solving problems that require computationally or otherwise expensive function evaluations. The key design principle of SMBO is a substitution of the true objective function by a surrogate, which is used to propose the point(s) to be evaluated next. SMBO algorithms are intrinsically modular, leaving the user with many important design choices. Significant research efforts go into understanding which settings perform best for which type of problems. Most works, however, focus on the choice of the model, the acquisition function, and the strategy used to optimize the latter. The choice of the initial sampling strategy, however, receives much less attention. Not surprisingly, quite diverging recommendations can be found in the literature. We analyze in this work how the size and the distribution of the initial sample influences the overall quality of the efficient global optimization (EGO) algorithm, a well-known SMBO approach. While, overall, small initial budgets using Halton sampling seem preferable, we also observe that the performance landscape is rather unstructured. We furthermore identify several situations in which EGO performs unfavorably against random sampling. Both observations indicate that an adaptive SMBO design could be beneficial, making SMBO an interesting test-bed for automated algorithm design.}},
  author       = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{continuous black-box optimization, design of experiments, initial design, sequential model-based optimization}},
  pages        = {{778–786}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Initial Design Strategies and Their Effects on Sequential Model-Based Optimization: An Exploratory Case Study Based on BBOB}}},
  doi          = {{10.1145/3377930.3390155}},
  year         = {{2020}},
}

@proceedings{19976,
  abstract     = {{The aim to reduce pollutant emission has led to a trend towards lightweight construction in car body development during the last years. As a consequence of the resulting need for multi-material design, mechanical joining technologies become increasingly important. Mechanical joining allows for the combination of dissimilar materials, while thermic joining techniques reach their limits. Self-piercing riveting enables the joining of dissimilar materials by using semi-tubular rivets as mechanical fasteners. The rivet production, however, is costly and time-consuming, as the rivets generally have to be hardened, tempered and coated after forming, in order to achieve an adequate strength and corrosion resistance. A promising approach to improve the efficiency of the rivet manufacturing is the use of high-strength high nitrogen steel as rivet material because these additional process steps would not be necessary anymore. As a result of the comparatively high nitrogen content, such steels have various beneficial properties like higher strength, good ductility and improved corrosion resistance. By cold bulk forming of high nitrogen steels high-strength parts can be manufactured due to the strengthening which is caused by the high strain hardening. However, high tool loads thereby have to be expected and are a major challenge during the production process. Consequently, there is a need for appropriate forming strategies. This paper presents key aspects concerning the process design for the manufacturing of semi-tubular self-piercing rivets made of high-strength steel. The aim is to produce the rivets in several forming stages without intermediate heat treatment between the single stages. Due to the high strain hardening of the material, a two stage forming concept will be investigated. Cup-backward extrusion is chosen as the first process step in order to form the rivet shank without forming the rivet foot. Thus, the strain hardening effects in the area of the rivet foot are minimized and the tool loads during the following process step can be reduced. During the second and final forming stage the detailed geometry of the rivet foot and the rivet head is formed. In this context, the effect of different variations, for example concerning the final geometry of the rivet foot, on the tool load is investigated using multistage numerical analysis. Furthermore, the influence of the process temperature on occurring stresses is analysed. Based on the results of the investigations, an adequate forming strategy and a tool concept for the manufacturing of semi-tubular self-piercing rivets made of high-strength steel are presented.}},
  editor       = {{Kuball, Clara-Maria and Uhe, Benedikt and Meschut, Gerson and Merklein, Marion}},
  keywords     = {{high nitrogen steel, self-piercing riveting, joining by forming, bulk forming, tool design}},
  pages        = {{280--285}},
  title        = {{{Process design for the forming of semi-tubular self-piercing rivets made of high nitrogen steel}}},
  doi          = {{10.1016/j.promfg.2020.08.052}},
  volume       = {{50}},
  year         = {{2020}},
}

@article{19973,
  abstract     = {{As a result of lightweight design, increased use is being made of high-strength steel and aluminium in car bodies. Self-piercing riveting is an established technique for joining these materials. The dissimilar properties of the two materials have led to a number of different rivet geometries in the past. Each rivet geometry fulfils the requirements of the materials within a limited range. In the present investigation, an improved rivet geometry is developed, which permits the reliable joining of two material combinations that could only be joined by two different rivet geometries up until now. Material combination 1 consists of high-strength steel on both sides, while material combination 2 comprises aluminium on the punch side and high-strength steel on the die side. The material flow and the stress and strain conditions prevailing during the joining process are analysed by means of numerical simulation. The rivet geometry is then improved step-by-step on the basis of this analysis. Finally, the improved rivet geometry is manufactured and the findings of the investigation are verified in experimental joining tests.}},
  author       = {{Uhe, Benedikt and Kuball, Clara-Maria and Merklein, Marion and Meschut, Gerson}},
  journal      = {{Production Engineering}},
  keywords     = {{Self-piercing riveting, Joining technology, Rivet geometry, Multi-material design, High-strength steel, Aluminium}},
  pages        = {{417--423}},
  title        = {{{Improvement of a rivet geometry for the self-piercing riveting of high-strength steel and multi-material joints}}},
  doi          = {{10.1007/s11740-020-00973-w}},
  volume       = {{14}},
  year         = {{2020}},
}

@inproceedings{10577,
  abstract     = {{State-of-the-art frameworks for generating approximate circuits automatically explore the search space in an iterative process - often greedily. Synthesis and verification processes are invoked in each iteration to evaluate the found solutions and to guide the search algorithm. As a result, a large number of approximate circuits is subjected to analysis - leading to long runtimes - but only a few approximate circuits might form an acceptable solution.

In this paper, we present our Jump Search (JS) method which seeks to reduce the runtime of an approximation process by reducing the number of expensive synthesis and verification steps. To reduce the runtime, JS computes impact factors for each approximation candidate in the circuit to create a selection of approximate circuits without invoking synthesis or verification processes. We denote the selection as path from which JS determines the final solution. In our experimental results, JS achieved speed-ups of up to 57x while area savings remain comparable to the reference search method, Simulated Annealing.}},
  author       = {{Witschen, Linus Matthias and Ghasemzadeh Mohammadi, Hassan and Artmann, Matthias and Platzner, Marco}},
  booktitle    = {{Proceedings of the 2019 on Great Lakes Symposium on VLSI  - GLSVLSI '19}},
  isbn         = {{9781450362528}},
  keywords     = {{Approximate computing, design automation, parameter selection, circuit synthesis}},
  location     = {{Tysons Corner, VA, USA}},
  publisher    = {{ACM}},
  title        = {{{Jump Search: A Fast Technique for the Synthesis of Approximate Circuits}}},
  doi          = {{10.1145/3299874.3317998}},
  year         = {{2019}},
}

@inproceedings{12918,
  abstract     = {{The test for small delay faults is of major importance for predicting potential early life failures or wearout problems. Typically, a faster-than-at-speed test (FAST) with sev¬eral different frequencies is used to detect also hidden small delays, which can only be propagated over short paths. But then the outputs at the end of long paths may no longer reach their stable values at the nominal observation time and must be considered as unknown (X-values). Thus, test response compaction for FAST must be extremely flexible to cope with high X-rates, which also vary with the test frequencies. Stochastic compaction introduced by Mitra et al. is controlled by weighted pseudo-random signals allowing for easy adaptation to varying conditions. As demonstrated in previous work, the pseudo-random control can be optimized for high fault efficiency or X-reduction, but a given target in fault efficiency cannot be guaranteed. To close this gap, a hybrid space compactor is introduced in this paper. It is based on the observation that many faults are lost in the compaction of relatively few critical test patterns. For these critical patterns a deterministic compaction phase is added to the test, where the existing compactor structure is re-used, but controlled by specifically determined control vectors. }},
  author       = {{Maaz, Mohammad Urf and Sprenger, Alexander and Hellebrand, Sybille}},
  booktitle    = {{50th IEEE International Test Conference (ITC)}},
  keywords     = {{Faster-than-at-speed test, BIST, DFT, Test response compaction, Stochastic compactor, X-handling}},
  location     = {{Washington, DC, USA}},
  pages        = {{1--8}},
  publisher    = {{IEEE}},
  title        = {{{A Hybrid Space Compactor for Adaptive X-Handling}}},
  year         = {{2019}},
}

@inproceedings{48409,
  author       = {{Wessel, Lena}},
  booktitle    = {{Eleventh Congress of the European Society for Research in Mathematics Education (CERME11)}},
  editor       = {{Jankvist, Uffe Thomas and van den Heuvel-Panhuizen, Marja and Veldhuis, Michiel}},
  keywords     = {{Vocational education, language, percentages, scaffolding, design research}},
  number       = {{12}},
  publisher    = {{Freudenthal Group}},
  title        = {{{How theories of language-responsive mathematics can inform teaching designs for vocational mathematics}}},
  volume       = {{TWG07}},
  year         = {{2019}},
}

@inproceedings{9992,
  abstract     = {{State-of-the-art industrial compact high power electronic packages require copper-copper interconnections with larger cross sections made by ultrasonic bonding. In comparison to aluminium-copper, copper-copper interconnections require increased normal forces and ultrasonic power, which might lead to substrate damage due to increased mechanical stresses. One option to raise friction energy without increasing vibration amplitude between wire and substrate or bonding force is the use of two-dimensional vibration. The first part of this contribution reports on the development of a novel bonding system that executes two-dimensional vibrations of a tool-tip to bond a nail- like pin onto a copper substrate. Since intermetallic bonds only form properly when surfaces are clean, oxide free and activated, the geometries of tool-tip and pin were optimised using finite element analysis. To maximize the area of the bonded annulus the distribution of normal pressure was optimized by varying the convexity of the bottom side of the pin. Second, a statistical model obtained from an experimental parameter study shows the influence of different bonding parameters on the bond result. To find bonding parameters with the minimum number of tests, the experiments have been planned using a D-optimal experimental design approach.}},
  author       = {{Dymel, Collin and Eichwald, Paul and Schemmel, Reinhard and Hemsel, Tobias and Brökelmann, Michael and Hunstig, Matthias and Sextro, Walter}},
  booktitle    = {{(Proceedings of 7th Electronics System-Integration Technology Conference, Dresden, Germany)}},
  keywords     = {{ultrasonic wire-bonding, bond-tool design, parameter identification, statistical engineering}},
  pages        = {{1--6}},
  title        = {{{Numerical and statistical investigation of weld formation in a novel two-dimensional copper-copper bonding process}}},
  year         = {{2018}},
}

@inproceedings{24395,
  abstract     = {{In the field of lightweight design by composites, the V-Model forms the basis of inter- and
transdisciplinary collaboration and research of 13 doctoral students from different disciplines, i. e.
engineering, sciences and social sciences. The technological challenges of the research college itself
and the V-Model as an approach for addressing these challenges are introduced. Within the
cooperation of the young researchers, a technology demonstrator was produced. On the one hand this
can be seen as demonstrator for the different technologies which are addressed by individual research
and on the other hand for the interdisciplinary collaboration itself. Exemplary, this technology
demonstrator is presented as one result of the research group and the challenges of the
interdisciplinary collaboration while producing it are pointed out.}},
  author       = {{Weiß, Borkowski and Ilona, Horwath and Berscheid, Anna lena and Fischer, Silvia Dohmeier and Tröster, Thomas}},
  keywords     = {{Lightweight Design, Composites, Interdisciplinarity, Transdisciplinarity, V-Model.}},
  location     = {{Valencia, Spain}},
  title        = {{{NEW APPROACHES IN LIGHTWEIGHT DESIGN: V-MODEL OF LIGHTWEIGHT DESIGN BY COMPOSITES AS AN APPROACH OF INTER- AND TRANSDISCIPLINARY RESEARCH}}},
  doi          = {{Weiß-Borkowski, N.; Horwath, I.; Berscheid, A.-L.; Tröster, T. (2018)}},
  year         = {{2018}},
}

@inproceedings{24468,
  abstract     = {{Inter- and transdisciplinary research are new demands in Higher Education. Aiming to enhance the social relevance, usability and sustainability of technological products and solutions, society and public institutions such as research funding organizations increasingly expect engineers to include inter- and transdisciplinary approaches into the development of new technologies. Engineering research and education, however, are particularly challenging areas to realize inter- and transdisciplinary collaborations, for manifold reasons.
This contribution presents methods and results of an inter- and transdisciplinary research and education strategy designed to meet the particular requirements of engineers and engineering students. It starts with a brief discussion of typical challenges regarding inter- and transdisciplinary approaches in engineering (research topics, research culture, skills, time, and barriers of lay people to involve in technology development). Secondly, it presents the methods developed to overcome those challenges within the context of the NRW Fortschrittskolleg "Light - Efficient - Mobile" (FK LEM). Founded in 2014, the FK LEM is a PhD programm focuses on lightweight construction, but with a special emphasis on how lightweight technologies are connected to different areas of society, to societal actors and technology users, and to the needs of a diversity of social groups. In order to explore these connections, we organized three workshops to bring public service, civil society, industry, practitioners and engineers together to discuss the perceived needs in those areas, and the potential of lightweight solutions. Topically, the workshops were dedicated to the fields of Rescue & Security Services; Care, Mobility & Assisted Living; and Sustainable Ressources & Climate Protection. Methodologically, we applied a pragmatic but valid approach to focus groups and discourse analysis. Results of the workshops in terms of directions for future research, epistemological and ethical dimensions of lightweight engineering are presented in the third part of our contribution. Finally, we discuss how our method and experience can be transferred into other engineering and educational contexts. With other words, how empowering students, engineers and the public to involve in inter- and transdisciplinary engineering processes can be achieved, and how this empowerment supports the development of innovative technologies as well as engineers’ skills to design technology in line with societies’ needs and challenges.}},
  author       = {{Horwath, Ilona and Dohmeier-Fischer, Silvia and Weiß-Borkowski, Nathalie and Tröster, Thomas}},
  booktitle    = {{INTED2018 Proceedings}},
  keywords     = {{Lightweight Design, Interdisciplinarity, Transdisciplinarity, Higher Education, Research Methods}},
  title        = {{{FROM EMPOWERMENT TO INNOVATION: INTER- AND TRANSDISCIPLINARY RESEARCH METHODS IN LIGHTWEIGHT ENGINEERING}}},
  doi          = {{10.21125/inted.2018.1651}},
  year         = {{2018}},
}

@article{4516,
  abstract     = {{Although many methods have been proposed for engineering service systems and customer solutions, most of these approaches give little consideration to recombinant service innovation. Recombinant innovation refers to reusing and integrating resources that were previously unconnected. In an age of networked products and data, we can expect that many service innovations will be based on adding, dissociating, and associating existing value propositions by accessing internal and external resources instead of designing them from scratch. The purpose of this paper is to identify if current service engineering approaches account for the mechanisms of recombinant innovation and to design a method for recombinant service systems engineering. In a conceptual analysis of 24 service engineering methods, the study identified that most methods (1) focus on designing value propositions instead of service systems, (2) view service independent of physical goods, (3) are either linear or iterative instead of agile, and (4) do not sufficiently address the mechanisms of recombinant innovation. The paper discusses how these deficiencies can be remedied and designs a revised service systems engineering approach that reorganizes service engineering processes according to four design principles. The method is demonstrated with the recombinant design of a service system for predictive maintenance of agricultural machines.}},
  author       = {{Beverungen, Daniel and Lüttenberg, Hedda and Wolf, Verena}},
  issn         = {{2363-7005}},
  journal      = {{Business & Information Systems Engineering}},
  keywords     = {{Service engineering, Recombinant innovation, (Product-)service system, Design science research, New service development}},
  number       = {{5}},
  pages        = {{377--391}},
  publisher    = {{SpringerNature}},
  title        = {{{Recombinant Service Systems Engineering}}},
  doi          = {{10.1007/s12599-018-0526-4}},
  volume       = {{60}},
  year         = {{2018}},
}

@inproceedings{9974,
  abstract     = {{The integrated modeling of behavior and reliability in system development delivers a model-based approach for reliability investigation by taking into account the dynamic system behavior as well as the system architecture at different phases of the development process. This approach features an automated synthesis of a reliability model out of a behavior model enabling for the closed loop modeling of degradation of the system and its (dynamic) behavior. The approach is integrated into the development process following Systems Engineering. It is based on standard models used in model-based development methodologies i.e. SysML or Matlab/Simulink. In addition to the theoretical description of the necessary steps the procedure is validated by an application example at two stages of the development process.}},
  author       = {{Hentze, Julian and Kaul, Thorben and Grässler, Iris and Sextro, Walter}},
  booktitle    = {{ICED17, 21st International conference on enginieering design}},
  keywords     = {{Design for X (DfX), Product modelling / models, Robust design, Systems Engineering (SE), Reliability}},
  pages        = {{385--394}},
  title        = {{{Integrated modeling og behavior and reliability in system development}}},
  year         = {{2017}},
}

