@article{36481,
  abstract     = {{Recent studies highlight early childhood teachers’ mathematics-related competence. Developing this competence should be a main aspect of early childhood teachers’ education. This is, however, not the case in all countries. Consequently, high-quality professional development courses are needed. Based on research results, we developed a competence-oriented continuous professional development course ("EmMa") and examined the effects of "EmMa" by asking: How does "EmMa" affect the development of early childhood teachers’ i) mathematical content knowledge, ii) mathematical pedagogical content knowledge and iii) beliefs towards mathematics in general? To answer these questions, we conducted a pre-test/post-test study including a control group with 99 in-service early childhood teachers. Results show that the course affected teachers’ mathematical pedagogical content knowledge and static orientation towards mathematics positively. From this we conclude that scaling-up "EmMa" might be a suitable approach to bridge the gap between pre-service education with nearly no mathematics and the challenges of early mathematics education.}},
  author       = {{Bruns, Julia and Eichen, Lars and Gasteiger, Hedwig}},
  journal      = {{Mathematics Teacher Education and Development (MTED)}},
  keywords     = {{Beliefs, Competency Based Teacher Education, Control Groups, Early Childhood Education, Faculty Development, Foreign Countries, Inservice Teacher Education, Intervention, Mathematical Aptitude, Mathematics Skills, Pedagogical Content Knowledge, Preschool Teachers, Pretests Posttests, Professional Continuing Education, Statistical Analysis, Teacher Competency Testing}},
  number       = {{3}},
  pages        = {{76–93}},
  title        = {{{Mathematics-related Competence of Early Childhood Teachers Visiting a Continuous Professional Development Course: An Intervention Study}}},
  volume       = {{19}},
  year         = {{2017}},
}

@inproceedings{191,
  abstract     = {{One purpose of requirement refinement is that higher-level requirements have to be translated to something usable by developers. Since customer requirements are often written in natural language by end users, they lack precision, completeness and consistency. Although user stories are often used in the requirement elicitation process in order to describe the possibilities how to interact with the software, there is always something unspoken. Here, we present techniques how to automatically refine vague software descriptions. Thus, we can bridge the gap by first revising natural language utterances from higher-level to more detailed customer requirements, before functionality matters. We therefore focus on the resolution of semantically incomplete user-generated sentences (i.e. non-instantiated arguments of predicates) and provide ontology-based gap-filling suggestions how to complete unverbalized information in the user’s demand.}},
  author       = {{Geierhos, Michaela and Bäumer, Frederik Simon}},
  booktitle    = {{Proceedings of the 21st International Conference on Applications of Natural Language to Information Systems (NLDB)}},
  editor       = {{Métais, Elisabeth  and Meziane, Farid  and Saraee, Mohamad  and Sugumaran, Vijayan  and Vadera, Sunil }},
  isbn         = {{978-3-319-41753-0}},
  keywords     = {{Requirement refinement, Concept expansion, Ontology-based instantiation of predicate-argument structure}},
  location     = {{Salford, UK}},
  pages        = {{37--47}},
  publisher    = {{Springer}},
  title        = {{{How to Complete Customer Requirements: Using Concept Expansion for Requirement Refinement}}},
  doi          = {{10.1007/978-3-319-41754-7_4}},
  volume       = {{9612}},
  year         = {{2016}},
}

@article{4421,
  abstract     = {{In Switzerland, every student graduating from grammar school can begin to study at a university. This leads to high dropout rates. Although students’ motivation is considered a strong predictor of performance, the development of motivation during students’ transition from high school to university has rarely been investigated. Additionally, little is known about the relation of motivational aspects with other influences on study performance. The present longitudinal study addresses this research gap and examines the development of economics and management students’ study motivation. It encompasses four waves of data collected throughout the first year, using quantitative online surveys. In total, the sample consists of 820 students. Data is analysed using latent change modelling. Results indicate that students start at a higher level of intrinsic motivation compared to extrinsic motivation. The variability of the starting value of the two constructs is also differing. The analysis also shows a gradual decline in students’ motivation. Above all, the transition from secondary to higher education seems to be a driver for this decline.}},
  author       = {{Brahm, Taiga and Jenert, Tobias and Wagner, Dietrich}},
  issn         = {{0018-1560}},
  journal      = {{Higher Education}},
  keywords     = {{Transition to higher education, Motivation, Longitudinal study, Socio-cultural factors, Latent change model, Switzerland}},
  number       = {{3}},
  pages        = {{459--478}},
  publisher    = {{Springer Nature}},
  title        = {{{The crucial first year: a longitudinal study of students’ motivational development at a Swiss Business School}}},
  doi          = {{10.1007/s10734-016-0095-8}},
  volume       = {{73}},
  year         = {{2016}},
}

@article{4951,
  abstract     = {{Despite the rapid growth and potential of technology-based services, managers' greatest challenges are gaining customer acceptance and increasing usage of these new innovative services. In the B2C field, studies of self-service technology show that perceived risk is an important factor influencing the use of service technology. Though prior research explores different risk types that emerge in consumer settings, risk perception in the B2B setting lacks a detailed examination of different risk types influencing technology-based service adoption. Data from 49 qualitative interviews with providers and customers in two different B2B industries inform this study. The findings emphasize the importance of functional and financial risks in a B2B context and show that business customers' personal and psychological fears hinder their use of technology-based services. Results highlight differences in risk perception and evaluation between customers and providers.}},
  author       = {{Paluch, Stefanie and Wünderlich, Nancy}},
  journal      = {{Journal of business Research}},
  keywords     = {{Risk perception, Technology-based service innovations, Business-to-business context, Interview study, Risk categories, Smart service}},
  number       = {{7}},
  pages        = {{2424----2431}},
  publisher    = {{Elsevier}},
  title        = {{{Contrasting Risk Perceptions of Technology-Based Service Innovations in Inter-Organizational Settings.}}},
  volume       = {{69}},
  year         = {{2016}},
}

@inproceedings{9963,
  abstract     = {{Tire-wheel assembly is the only connection between road and vehicle. Contacting directly with road within postcard size of contact area, it is mounted and guided by the suspension system. Therefore kinematics and compliances of suspension system greatly influence the frictional coupling of tire tread elements and road surface asperities by affecting pressure and sliding velocity distribution in the contact zone. This study emphasizes the development of a numerical methodology for frictional rolling contact analysis with focus on interaction of suspension system dynamics and tire-road contact using ADAMS. For this purpose a comprehensive flexible multibody system of the multi-link rear suspension is established, where both flexible and rigid bodies are modeled to allow large displacements with included elastic effects. To meet accuracy requirements for the high frequency applications, such as road excitations, the amplitude- and frequency-dependency of rubber-metal bushings is included. Furthermore the proposed flexible viscoelastic suspension model is enhanced by a Flexible Ring Tire Model (FTire), which describes a 3D tire dynamic response and covers any road excitations by tread submodel connected to road surface model. Concerning the verification and validation procedure numerous experiments are carried out to confirm the validity and the accuracy of both the developed submodels and the entire model. The devised approach makes it possible to investigate the influence of suspension system design on dynamical rolling contact and to evaluate tire tread wear. Therefore it can be a useful tool to predict frictional power distribution within the contact area under more realistic conditions.}},
  author       = {{Kohl, Sergej and Sextro, Walter and Schulze, Sebastian}},
  booktitle    = {{The 2nd International Conference on Automotive Innovation and Green Energy Vehicle (AiGEV 2016), Cyberjaya, Malaysia, 2016.}},
  keywords     = {{Kinematics and compliances, flexible viscoelastic suspension model, frictional rolling contact analysis, frictional power distribution.}},
  pages        = {{1--12}},
  title        = {{{Aspects of Flexible Viscoelastic Suspension Modeling for Frictional Rolling Contact Analysis using ADAMS}}},
  year         = {{2016}},
}

@inproceedings{9967,
  abstract     = {{Multibody models of mechatronic systems are usually interdisciplinary and are continuously gaining complexity, due to a growing demand for comprehensive models of systems including effects of electro mechanics, elastic bodies, contacts and friction. To be capable of simulating large models with subassemblies and contact between bodies, reduction techniques are required, which need certain experience in the choice of parameters. This publication discusses different possibilities for the modal description of structures in flexible multibody models with application to an Adaptive Frontlighting System in ADAMS. It will be shown that mode count, assembling of structures before and after modal reduction and influence of damping parameters of particular structures and subassemblies affect the behavior of the entire system. A common reduction technique for flexible structures in multibody models is the component mode synthesis, which uses a certain number of modes for description of the modal behavior of a structure. The influence of the mode count will be shown by means of different modal descriptions of one structure that contributes to a comprehensive model. Another study will prove that modal data of subassemblies and assemblies of modal reduced single structures lead to different models. The definition of damping parameters depends on the number of structures that have been added to an assembly before modal reduction and on the number of modal reduced structures. The comparison of subassemblies and the entire model to experimental data will highlight the accuracy, computational overhead, complexity of models and modeling efficiency of the comprehensive model for the frontlighting system.}},
  author       = {{Schulze, Sebastian and Sextro, Walter and Kohl, Sergej}},
  booktitle    = {{2nd International Conference on Automotive Innovation and Green Energy Vehicle (AiGEV) Malaysia 2016}},
  keywords     = {{model reduction, modal description, flexible multibody systems}},
  pages        = {{1--11}},
  title        = {{{Using Adequate Reduced Models for Flexible Multibody Systems of Automotive Mechatronic Systems}}},
  year         = {{2016}},
}

@inproceedings{5588,
  abstract     = {{The protection of information technology (IT) has become and is predicted to remain a key economic challenge for organizations. While research on IT security investment is fast growing, it lacks a theoretical basis for structuring research, explaining economic-technological phenomena and guide future research. We address this shortcoming by suggesting a new theoretical model emerging from a multi-theoretical perspective adopt-ing the Resource-Based View and the Organizational Learning Theory. The joint appli-cation of these theories allows to conceptualize in one theoretical model the organiza-tional learning effects that occur when the protection of organizational resources through IT security countermeasures develops over time. We use this model of IT security invest-ments to synthesize findings of a large body of literature and to derive research gaps. We also discuss managerial implications of (closing) these gaps by providing practical ex-amples.}},
  author       = {{Weishäupl, Eva and Yasasin, Emrah and Schryen, Guido}},
  booktitle    = {{International Conference on Information Systems}},
  keywords     = {{Information Security, Investment, Literature review, Resource-based View, Organi-zational Learning Theory, Multi-theoretical Perspective}},
  title        = {{{A Multi-Theoretical Literature Review on Information Security Investments using the Resource-Based View and the Organizational Learning Theory}}},
  year         = {{2015}},
}

@inproceedings{48838,
  abstract     = {{The majority of algorithms can be controlled or adjusted by parameters. Their values can substantially affect the algorithms’ performance. Since the manual exploration of the parameter space is tedious – even for few parameters – several automatic procedures for parameter tuning have been proposed. Recent approaches also take into account some characteristic properties of the problem instances, frequently termed instance features. Our contribution is the proposal of a novel concept for feature-based algorithm parameter tuning, which applies an approximating surrogate model for learning the continuous feature-parameter mapping. To accomplish this, we learn a joint model of the algorithm performance based on both the algorithm parameters and the instance features. The required data is gathered using a recently proposed acquisition function for model refinement in surrogate-based optimization: the profile expected improvement. This function provides an avenue for maximizing the information required for the feature-parameter mapping, i.e., the mapping from instance features to the corresponding optimal algorithm parameters. The approach is validated by applying the tuner to exemplary evolutionary algorithms and problems, for which theoretically grounded or heuristically determined feature-parameter mappings are available.}},
  author       = {{Bossek, Jakob and Bischl, Bernd and Wagner, Tobias and Rudolph, Günter}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-3472-3}},
  keywords     = {{evolutionary algorithms, model-based optimization, parameter tuning}},
  pages        = {{1319–1326}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Learning Feature-Parameter Mappings for Parameter Tuning via the Profile Expected Improvement}}},
  doi          = {{10.1145/2739480.2754673}},
  year         = {{2015}},
}

@inproceedings{1134,
  abstract     = {{This paper focuses on the first step in combining prescriptive analytics with scenario techniques in order to provide strategicdevelopment after the useof InSciTe, a data prescriptive analytics application. InSciTe supports the improvement of researchers‘ individual performance by recommending new research directions. Standardized influential factors are presented as a foundation for automated scenario modelling such as the prototypical report generation function of InSciTe. Additionally, a use-case is shown which validatesthe potential of the standardized influential factors for raw scenario development.}},
  author       = {{Weber, Jens and Minhee, Cho and Lee, Mikyoung and Song, Sa-kwang and Geierhos, Michaela and Jung, Hanmin}},
  booktitle    = {{Proceedings of the First International Workshop on Patent Mining and Its Applications (IPaMin 2014) co-located with Konvens 2014}},
  editor       = {{Jung, Hanmin and Mandl, Thomas and Womsen-Hacker, Christa and Xu, Shuo}},
  issn         = {{16130073}},
  keywords     = {{Standardized Influential Factors, Prescriptive Analytics, Role Model Group, Scenario Technique}},
  location     = {{Hildesheim, Germany}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{System Thinking: Crafting Scenarios for Prescriptive Analytics}}},
  volume       = {{1292}},
  year         = {{2014}},
}

@inproceedings{4474,
  abstract     = {{The transition from school to university can be overwhelming for some students. While students' motivation is considered a strong predictor of performance, the development of motivation during students' transition from high school to university has rarely been investigated. Additionally, little is known about the relation of motivational aspects with other influences on study performance. The present mixed methods study addresses this research gap and examines the development of economics students' study motivation. The longitudinal mixed-method study included three waves of data collected using quantitative surveys and a series of five interviews conducted with 14 first-year students over eight months. Regarding students' development over time, the quantitative analysis showed a gradual decline in students' motivation. However, in the interviews, certain events during the first year were identified as critical incidents demotivating students. These insights into the development of students' motivation at a business school show that concrete incidents influence students' motivational development; thus, they could be transformed by structural changes.}},
  author       = {{Brahm, Taiga and Jenert, Tobias}},
  keywords     = {{motivation, business school, higher education, latent growth curve model, longitudinal study, mixed methods, interview, students}},
  location     = {{Leuven}},
  title        = {{{The crucial first year: The development of students' motivation at a Business School-a Mixed Methods Study}}},
  year         = {{2014}},
}

@inproceedings{4476,
  abstract     = {{Auseinandersetzungen über Forschungsmethodologien haben in den Bildungswissenschaften lange Tradition. Sie werden häufig unversöhnlich und mit Profilierung der Gegensätze geführt. Für uns hängt die Sinnhaftigkeit einer Forschungskonzeption von der Problemstellung und dem Erkenntnisinteresse ab. Dies gilt auch für Design-Based Research (DBR). Dieser Ansatz entstand als Reaktion auf die Kritik an mangelnder praktischer Anwendung von Befunden aus der empirisch-analytischen Lehr-Lernforschung. Als Ergebnis werden Theorien angestrebt, die für die Praxis einen Nutzen bieten und zugleich über die Anwendung in einer singulären Situation hinausgehen. Wir wollen folgenden Fragen nachgehen: Wo liegen die Gemeinsamkeiten, wo die Spezifika von DBR gegenüber bestehenden Methodologien? Wie wird der Generalisierungsanspruch von Theorien innerhalb von DBR verstanden und umgesetzt? Für welche Erkenntnisinteressen eignet sich DBR? Welche Methoden sind für DBR sinnvoll und können wie angewendet werden?}},
  author       = {{Brahm, Taiga and Jenert, Tobias}},
  keywords     = {{design-based research, gestatlungsbasierte Forschung, Erziehungswissenschaft, Design, Mixed Methods}},
  location     = {{Berlin}},
  publisher    = {{Humboldt-Universität}},
  title        = {{{Wie kann über DBR die wissenschaftliche und praktische Relevanz der Forschung gesichert werden?}}},
  year         = {{2014}},
}

@inproceedings{9868,
  abstract     = {{In order to increase mechanical strength, heat dissipation and ampacity and to decrease failure through fatigue fracture, wedge copper wire bonding is being introduced as a standard interconnection method for mass production. To achieve the same process stability when using copper wire instead of aluminum wire a profound understanding of the bonding process is needed. Due to the higher hardness of copper compared to aluminum wire it is more difficult to approach the surfaces of wire and substrate to a level where van der Waals forces are able to arise between atoms. Also, enough friction energy referred to the total contact area has to be generated to activate the surfaces. Therefore, a friction model is used to simulate the joining process. This model calculates the resulting energy of partial areas in the contact surface and provides information about the adhesion process of each area. The focus here is on the arising of micro joints in the contact area depending on the location in the contact and time. To validate the model, different touchdown forces are used to vary the initial contact areas of wire and substrate. Additionally, a piezoelectric tri-axial force sensor is built up to identify the known phases of pre-deforming, cleaning, adhering and diffusing for the real bonding process to map with the model. Test substrates as DBC and copper plate are used to show the different formations of a wedge bond connection due to hardness and reaction propensity. The experiments were done by using 500 $\mu$m copper wire and a standard V-groove tool.}},
  author       = {{Althoff, Simon and Neuhaus, Jan and Hemsel, Tobias and Sextro, Walter}},
  booktitle    = {{Electronic Components and Technology Conference (ECTC), 2014 IEEE 64th}},
  keywords     = {{adhesion, circuit reliability, deformation, diffusion, fatigue cracks, friction, interconnections, lead bonding, van der Waals forces, Cu, adhering process, adhesion process, ampacity improvement, bond quality improvement, cleaning process, diffusing process, fatigue fracture failure, friction energy, friction model, heat dissipation, mechanical strength, piezoelectric triaxial force sensor, predeforming process, size 500 mum, total contact area, van der Waals forces, wedge copper wire bonding, Bonding, Copper, Finite element analysis, Force, Friction, Substrates, Wires}},
  pages        = {{1549--1555}},
  title        = {{{Improving the bond quality of copper wire bonds using a friction model approach}}},
  doi          = {{10.1109/ECTC.2014.6897500}},
  year         = {{2014}},
}

@inproceedings{9879,
  abstract     = {{Application of prognostics and health management (PHM) in the field of Proton Exchange Membrane (PEM) fuel cells is emerging as an important tool in increasing the reliability and availability of these systems. Though a lot of work is currently being conducted to develop PHM systems for fuel cells, various challenges have been encountered including the self-healing effect after characterization as well as accelerated degradation due to dynamic loading, all which make RUL predictions a difficult task. In this study, a prognostic approach based on adaptive particle filter algorithm is proposed. The novelty of the proposed method lies in the introduction of a self-healing factor after each characterization and the adaption of the degradation model parameters to fit to the changing degradation trend. An ensemble of five different state models based on weighted mean is then developed. The results show that the method is effective in estimating the remaining useful life of PEM fuel cells, with majority of the predictions falling within 5\% error. The method was employed in the IEEE 2014 PHM Data Challenge and led to our team emerging the winner of the RUL category of the challenge.}},
  author       = {{Kimotho, James Kuria  and Meyer, Tobias and Sextro, Walter}},
  booktitle    = {{Prognostics and Health Management (PHM), 2014 IEEE Conference on}},
  keywords     = {{ageing, particle filtering (numerical methods), proton exchange membrane fuel cells, remaining life assessment, PEM fuel cell prognostics, PHM, RUL predictions, accelerated degradation, adaptive particle filter algorithm, dynamic loading, model parameter adaptation, prognostics and health management, proton exchange membrane fuel cells, remaining useful life estimation, self-healing effect, Adaptation models, Data models, Degradation, Estimation, Fuel cells, Mathematical model, Prognostics and health management}},
  pages        = {{1--6}},
  title        = {{{PEM fuel cell prognostics using particle filter with model parameter adaptation}}},
  doi          = {{10.1109/ICPHM.2014.7036406}},
  year         = {{2014}},
}

@inproceedings{9880,
  abstract     = {{With the paradigm shift towards prognostic and health management (PHM) of machinery, there is need for reliable PHM methodologies with narrow error bounds to allow maintenance engineers take decisive maintenance actions based on the prognostic results. Prognostics is mainly concerned with the estimation of the remaining useful life (RUL) or time to failure (TTF). The accuracy of PHM methods is usually a function of the features extracted from the raw data obtained from sensors. In cases where the extracted features do not display clear degradation trends, for instance highly loaded bearings, the accuracy of the state of the art PHM methods is significantly affected. The data which lacks clear degradation trend is referred to as non-trending data. This study presents a method for extracting degradation trends from non-trending condition monitoring data for RUL estimation. The raw signals are first filtered using a discrete wavelet transform (DWT) denoising filter to remove noise from the acquired signals. Time domain, frequency domain and time-frequency domain features are then extracted from the filtered signals. An autoregressive model is then applied to the extracted features to identify the degradation trends. Features representing the maximum health information are then selected based on a performance evaluation criteria using extreme learning machine (ELM) algorithm. The selected features can then be used as inputs in a prognostic algorithm. The feasibility of the method is demonstrated using experimental bearing vibration data. The performance of the method is evaluated on the accuracy of RUL estimation and the results show that the method can be used to accurately estimate RUL with a maximum error of 10\%.}},
  author       = {{Kimotho, James Kuria and Sextro, Walter}},
  booktitle    = {{Proceedings of the Second European Conference of the Prognostics and Health Management Society 2014}},
  keywords     = {{autoregressive model ELM feature extraction feature selection non-trending Remaining useful Life}},
  title        = {{{An approach for feature extraction and selection from non-trending data for machinery prognosis}}},
  volume       = {{5}},
  year         = {{2014}},
}

@inproceedings{9895,
  abstract     = {{Power semiconductor modules are used to control and switch high electrical currents and voltages. Within the power module package wire bonding is used as an interconnection technology. In recent years, aluminum wire has been used preferably, but an ever-growing market of powerful and efficient power modules requires a material with better mechanical and electrical properties. For this reason, a technology change from aluminum to copper is indispensable. However, the copper wire bonding process reacts more sensitive to parameter changes. This makes manufacturing reliable copper bond connections a challenging task. The aim of the BMBF funded project Itsowl-InCuB is the development of self-optimizing techniques to enable the reliable production of copper bond connections under varying conditions. A model of the process is essential to achieve this aim. This model needs to include the dynamic elasto-plastic deformation, the ultrasonic softening effect and the proceeding adhesion between wire and substrate. This paper focusses on the pre-deformation process. In the touchdown phase, the wire is pressed into the V-groove of the tool and a small initial contact area between wire and substrate arise. The local characteristics of the material change abruptly because of the cold forming. Consequently, the pre-deformation has a strong effect on the joining process. In [1], a pre-cleaning effect during the touchdown process of aluminum wires by cracking of oxide layers was presented. These interactions of the process parameters are still largely unknown for copper. In a first step, this paper validates the importance of modeling the pre-deformation by showing its impact on the wire deformation characteristic experimentally. Creating cross-section views of pre-deformed copper wires has shown a low deformation degree compared to aluminum. By using a digital microscope and a scanning confocal microscope an analysis about the contact areas and penetration depths after touchdown has been made. Additionally, it has to be taken into account that the dynamical touchdown force depends on the touchdown speed and the touchdown force set in the bonding machine. In order to measure the overshoot in the force signals, a strain gauge sensor has been used. Subsequently, the affecting factors have been interpreted independently Furthermore, the material properties of copper wire have been investigated with tensile tests and hardness measurements. In a second step, the paper presents finite element models of the touchdown process for source and destination bonds. These models take the measured overshoot in the touchdown forces into account. A multi-linear, isotropic material model has been selected to map the material properties of the copper. A validation of the model with the experimental determined contact areas, normal pressures and penetration depths reveals the high model quality. Thus, the simulation is able to calculate and visualize the three dimensional pre-deformation with an integrated material parameter of the wire if the touchdown parameters of the bonding machine are known. Based on the calculated deformation degrees of wire and substrate, it is probably possible to investigate the effect of the pre-deformation on the pre-cleaning phase in the copper wire bonding.}},
  author       = {{Unger, Andreas and Sextro, Walter and Althoff, Simon and Eichwald, Paul and Meyer, Tobias and Eacock, Florian and Brökelmann, Michael}},
  booktitle    = {{Proceedings of the 47th International Symposium on Microelectronics (IMAPS)}},
  keywords     = {{pre-deformation, copper wire bonding, finite element model}},
  pages        = {{289--294}},
  title        = {{{Experimental and Numerical Simulation Study of Pre-Deformed Heavy Copper Wire Wedge Bonds}}},
  year         = {{2014}},
}

@inproceedings{11753,
  abstract     = {{This contribution describes a step-wise source counting algorithm to determine the number of speakers in an offline scenario. Each speaker is identified by a variational expectation maximization (VEM) algorithm for complex Watson mixture models and therefore directly yields beamforming vectors for a subsequent speech separation process. An observation selection criterion is proposed which improves the robustness of the source counting in noise. The algorithm is compared to an alternative VEM approach with Gaussian mixture models based on directions of arrival and shown to deliver improved source counting accuracy. The article concludes by extending the offline algorithm towards a low-latency online estimation of the number of active sources from the streaming input data.}},
  author       = {{Drude, Lukas and Chinaev, Aleksej and Tran Vu, Dang Hai and Haeb-Umbach, Reinhold}},
  booktitle    = {{14th International Workshop on Acoustic Signal Enhancement (IWAENC 2014)}},
  keywords     = {{Accuracy, Acoustics, Estimation, Mathematical model, Soruce separation, Speech, Vectors, Bayes methods, Blind source separation, Directional statistics, Number of speakers, Speaker diarization}},
  pages        = {{213--217}},
  title        = {{{Towards Online Source Counting in Speech Mixtures Applying a Variational EM for Complex Watson Mixture Models}}},
  year         = {{2014}},
}

@article{11861,
  abstract     = {{In this contribution we present a theoretical and experimental investigation into the effects of reverberation and noise on features in the logarithmic mel power spectral domain, an intermediate stage in the computation of the mel frequency cepstral coefficients, prevalent in automatic speech recognition (ASR). Gaining insight into the complex interaction between clean speech, noise, and noisy reverberant speech features is essential for any ASR system to be robust against noise and reverberation present in distant microphone input signals. The findings are gathered in a probabilistic formulation of an observation model which may be used in model-based feature compensation schemes. The proposed observation model extends previous models in three major directions: First, the contribution of additive background noise to the observation error is explicitly taken into account. Second, an energy compensation constant is introduced which ensures an unbiased estimate of the reverberant speech features, and, third, a recursive variant of the observation model is developed resulting in reduced computational complexity when used in model-based feature compensation. The experimental section is used to evaluate the accuracy of the model and to describe how its parameters can be determined from test data.}},
  author       = {{Leutnant, Volker and Krueger, Alexander and Haeb-Umbach, Reinhold}},
  issn         = {{2329-9290}},
  journal      = {{IEEE/ACM Transactions on Audio, Speech, and Language Processing}},
  keywords     = {{computational complexity, reverberation, speech recognition, automatic speech recognition, background noise, clean speech, computational complexity, energy compensation, logarithmic mel power spectral domain, mel frequency cepstral coefficients, microphone input signals, model-based feature compensation schemes, noisy reverberant speech automatic recognition, noisy reverberant speech features, reverberation, Atmospheric modeling, Computational modeling, Noise, Noise measurement, Reverberation, Speech, Vectors, Model-based feature compensation, observation model for reverberant and noisy speech, recursive observation model, robust automatic speech recognition}},
  number       = {{1}},
  pages        = {{95--109}},
  title        = {{{A New Observation Model in the Logarithmic Mel Power Spectral Domain for the Automatic Recognition of Noisy Reverberant Speech}}},
  doi          = {{10.1109/TASLP.2013.2285480}},
  volume       = {{22}},
  year         = {{2014}},
}

@article{11867,
  abstract     = {{New waves of consumer-centric applications, such as voice search and voice interaction with mobile devices and home entertainment systems, increasingly require automatic speech recognition (ASR) to be robust to the full range of real-world noise and other acoustic distorting conditions. Despite its practical importance, however, the inherent links between and distinctions among the myriad of methods for noise-robust ASR have yet to be carefully studied in order to advance the field further. To this end, it is critical to establish a solid, consistent, and common mathematical foundation for noise-robust ASR, which is lacking at present. This article is intended to fill this gap and to provide a thorough overview of modern noise-robust techniques for ASR developed over the past 30 years. We emphasize methods that are proven to be successful and that are likely to sustain or expand their future applicability. We distill key insights from our comprehensive overview in this field and take a fresh look at a few old problems, which nevertheless are still highly relevant today. Specifically, we have analyzed and categorized a wide range of noise-robust techniques using five different criteria: 1) feature-domain vs. model-domain processing, 2) the use of prior knowledge about the acoustic environment distortion, 3) the use of explicit environment-distortion models, 4) deterministic vs. uncertainty processing, and 5) the use of acoustic models trained jointly with the same feature enhancement or model adaptation process used in the testing stage. With this taxonomy-oriented review, we equip the reader with the insight to choose among techniques and with the awareness of the performance-complexity tradeoffs. The pros and cons of using different noise-robust ASR techniques in practical application scenarios are provided as a guide to interested practitioners. The current challenges and future research directions in this field is also carefully analyzed.}},
  author       = {{Li, Jinyu and Deng, Li and Gong, Yifan and Haeb-Umbach, Reinhold}},
  journal      = {{IEEE Transactions on Audio, Speech and Language Processing}},
  keywords     = {{Speech recognition, compensation, distortion modeling, joint model training, noise, robustness, uncertainty processing}},
  number       = {{4}},
  pages        = {{745--777}},
  title        = {{{An Overview of Noise-Robust Automatic Speech Recognition}}},
  doi          = {{10.1109/TASLP.2014.2304637}},
  volume       = {{22}},
  year         = {{2014}},
}

@article{4425,
  abstract     = {{Designbasierte Forschung zielt darauf, praktische Problemstellungen zu lösen und gleichzeitig wissenschaftliche Theorien (weiter) zu entwickeln. Dabei durchläuft designbasierte Forschung in der Regel die Phasen der Problemdefinition, der Entwicklung eines didaktischen Designs, der zyklischen Design-Implementation sowie der Evaluation und Reflexion in enger Kooperation von Wissenschaft und Praxis. Inwieweit es mittels designbasierter Forschung gelingen kann, wissenschaftliche Gültigkeit und praktische Relevanz von Forschung gleichermassen zu steigern, wird im Beitrag anhand des Konzepts multipler Signifikanzen erörtert. Dabei wird diskutiert, wie die Gestaltung der Kooperation von Forschenden und Praktikern/-innen dazu beitragen kann, die praktische, statistische, klinische und wirtschaftliche Signifikanz des designbasierten Forschungsprozesses sicherzustellen.}},
  author       = {{Brahm, Taiga and Jenert, Tobias}},
  issn         = {{0172-2875}},
  journal      = {{Zeitschrift für Berufs-und Wirtschaftspädagogik-Beihefte (ZBW-B)}},
  keywords     = {{Design-based research, design research, validity, significance, Bildungsforschung, designbasierte Forschung, Wissenschafts-Praxis-Kommunikation}},
  number       = {{Band 27}},
  pages        = {{45--62}},
  publisher    = {{Steiner}},
  title        = {{{Wissenschafts-Praxis-Kooperation in designbasierter Forschung: Im Spannungsfeld zwischen wissenschaftlicher Gültigkeit und praktischer Relevanz}}},
  year         = {{2014}},
}

@inproceedings{10620,
  author       = {{Anwer, Jahanzeb and Meisner, Sebastian and Platzner, Marco}},
  booktitle    = {{Reconfigurable Computing and FPGAs (ReConFig), 2013 International Conference on}},
  keywords     = {{fault tolerant computing, field programmable gate arrays, logic design, reliability, BYU-LANL tool, DRM tool flow, FPGA based hardware designs, avionic application, device technologies, dynamic reliability management, fault-tolerant operation, hardware designs, reconfiguring reliability levels, space applications, Field programmable gate arrays, Hardware, Redundancy, Reliability engineering, Runtime, Tunneling magnetoresistance}},
  pages        = {{1--6}},
  title        = {{{Dynamic reliability management: Reconfiguring reliability-levels of hardware designs at runtime}}},
  doi          = {{10.1109/ReConFig.2013.6732280}},
  year         = {{2013}},
}

