@article{25046,
  abstract     = {{<jats:p>While increasing digitalization enables multiple advantages for a reliable operation of technical systems, a remaining challenge in the context of condition monitoring is seen in suitable consideration of uncertainties affecting the monitored system. Therefore, a suitable prognostic approach to predict the remaining useful lifetime of complex technical systems is required. To handle different kinds of uncertainties, a novel Multi-Model-Particle Filtering-based prognostic approach is developed and evaluated by the use case of rubber-metal-elements. These elements are maintained preventively due to the strong influence of uncertainties on their behavior. In this paper, two measurement quantities are compared concerning their ability to establish a prediction of the remaining useful lifetime of the monitored elements and the influence of present uncertainties. Based on three performance indices, the results are evaluated. A comparison with predictions of a classical Particle Filter underlines the superiority of the developed Multi-Model-Particle Filter. Finally, the value of the developed method for enabling condition monitoring of technical systems related to uncertainties is given exemplary by a comparison between the preventive and the predictive maintenance strategy for the use case.</jats:p>}},
  author       = {{Bender, Amelie}},
  issn         = {{2075-1702}},
  journal      = {{Machines}},
  keywords     = {{prognostics, RUL predictions, particle filter, uncertainty consideration, Multi-Model-Particle Filter, model-based approach, rubber-metal-elements, predictive maintenance}},
  number       = {{10}},
  title        = {{{A Multi-Model-Particle Filtering-Based Prognostic Approach to Consider Uncertainties in RUL Predictions}}},
  doi          = {{10.3390/machines9100210}},
  volume       = {{9}},
  year         = {{2021}},
}

@article{35202,
  abstract     = {{Purpose: This study aims at investigating how digitalisation (in the sense of industry 4.0) has changed the work of farmers and how they experience the changes from more traditional work to digitalised agriculture. It also investigates what knowledge farmers require on digitalised farms and how they acquire it. Dairy farming was used as domain of investigation since it, unlike other industries, has strongly been affected by digitalisation throughout the last years.

Method: Exploratory interviews with 10 livestock farmers working on digitalised dairy farms were analysed using qualitative content analysis. A deductive and inductive coding strategy was used. 

Findings: Farming work has changed from more manual tasks towards symbol manipulation and data processing. Farmers must be able to use computers and other digital devices to retrieve and analyse sensor data that allow them to monitor and control the processes on their farm. For this new kind of work, farmers require elaborated mental models that link traditional farming knowledge with knowledge about digital systems, including a strong understanding of production processes underlying their farm. Learning is mostly based on instructions offered by manufacturers of the new technology as well as informal and non-formal learning modes. Even younger farmers report that digital technology was not sufficiently covered in their (vocational) degrees. In general, farmers emphasises the positive effects of digitalisation both on their working as well as private life. 

Conclusions: Farmers should be aware of the opportunities as well as the potential drawbacks of the digitalisation of work processes in agriculture. Providers of agricultural education (like vocational schools or training institutes) need to incorporate the knowledge and skills required to work in digitalised environments (e.g., data literacy) in their syllabi. Further studies are required to assess how digitalisation changes farming practices and what knowledge as well as skills linked to these developments are required in the future.}},
  author       = {{Goller, Michael and Caruso, Carina and Harteis, Christian}},
  issn         = {{2197-8646}},
  journal      = {{International Journal for Research in Vocational Education and Training}},
  keywords     = {{Work-Based Learning, Organisational Change, Digital Competences, Qualitative Research, Digitalisation, Farming, Dairy, VET, Vocational Education and Training}},
  number       = {{2}},
  pages        = {{208–223}},
  title        = {{{Digitalisation in Agriculture: Knowledge and Learning Requirements of German Dairy Farmers}}},
  doi          = {{10.13152/IJRVET.8.2.4.}},
  volume       = {{8}},
  year         = {{2021}},
}

@techreport{37136,
  abstract     = {{This study examines the relation between voluntary audit and the cost of debt in private firms. We use a sample of 4,058 small private firms operating in the period 2006‐2017 that are not subject to mandatory audits. Firms decide for a voluntary audit of financial statements either because the economic setting in which they operate effectively forces them to do so (e.g., ownership complexity, export‐oriented supply chain, subsidiary status) or because firm fundamentals and/or financial reporting practices limit their access to financial debt, both reflected in earnings quality. We use these factors to model the decision for voluntary audit. In the outcome analyses, we find robust evidence that voluntary audits are associated with higher, rather than lower, interest rate by up to 3.0 percentage points. This effect is present regardless of the perceived audit quality (Big‐4 vs. non‐Big‐4), but is stronger for non‐Big‐4 audits where auditees have a stronger position relative to auditors. Audited firms’ earnings are less informative about future operating performance relative to unaudited counterparts. We conclude that voluntary audits facilitate access to financial debt for firms with higher risk that may otherwise have no access to this form of financing. The price paid is reflected in higher interest rates charged to firms with voluntary audits – firms with higher information and/or fundamental risk.}},
  author       = {{Ichev, Riste and Koren, Jernej and Kosi, Urska and Sitar Sustar, Katarina and Valentincic, Aljosa}},
  keywords     = {{private firms, voluntary audit, cost of debt, self‐selection bias, risk}},
  title        = {{{Cost of Debt for Private Firms Revisited: Voluntary Audits as a Reflection of Risk}}},
  year         = {{2021}},
}

@inproceedings{24280,
  abstract     = {{Challenges in decisions on technical changes are the lack of knowledge about the expected impact and change propagation. Currently, no literature study contains a systematic differentiation and evaluation of existing approaches, which is a prerequisite for practitioners to select a suitable approach. This research aims at defining differentiation criteria as well as generally applicable requirements for evaluation. A four-step approach is used: systematic literature review on approaches for impact analysis of engineering changes (1), categorization and prioritization of approaches based on reoccuring elements (2), derivation of context specific requirements for evaluation (3), and evaluation of approaches (4). The result indicates existing potential of object-oriented modeling approaches.}},
  author       = {{Gräßler, Iris and Wiechel, Dominik}},
  booktitle    = {{DS 111: Proceedings of the 32nd Symposium Design for X}},
  editor       = {{Krause, Dieter and Paetzold, Kristin and Wartzack, Sandro}},
  keywords     = {{Engineering Change Management, Impact Analysis, Engineering  Changes, Model-based Systems Engineering, Product Developmen}},
  location     = {{Tutzing}},
  title        = {{{Systematische Bewertung von Auswirkungsanalysen des Engineering Change Managements}}},
  doi          = {{10.35199/dfx2021.12}},
  year         = {{2021}},
}

@article{25212,
  abstract     = {{Finding a good query plan is key to the optimization of query runtime. This holds in particular for cost-based federation
engines, which make use of cardinality estimations to achieve this goal. A number of studies compare SPARQL federation engines across different performance metrics, including query runtime, result set completeness and correctness, number of sources selected and number of requests sent. Albeit informative, these metrics are generic and unable to quantify and evaluate the accuracy of the cardinality estimators of cost-based federation engines. To thoroughly evaluate cost-based federation engines, the effect of estimated cardinality errors on the overall query runtime performance must be measured. In this paper, we address this challenge by presenting novel evaluation metrics targeted at a fine-grained benchmarking of cost-based federated SPARQL query engines. We evaluate five cost-based federated SPARQL query engines using existing as well as novel evaluation metrics by using LargeRDFBench queries. Our results provide a detailed analysis of the experimental outcomes that reveal novel insights, useful for the development of future cost-based federated SPARQL query processing engines.}},
  author       = {{Qudus, Umair and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille and Lee, Young-Koo}},
  issn         = {{2210-4968}},
  journal      = {{Semantic Web}},
  keywords     = {{SPARQL, benchmarking, cost-based, cost-free, federated, querying}},
  number       = {{6}},
  pages        = {{843--868}},
  publisher    = {{ISO Press}},
  title        = {{{An Empirical Evaluation of Cost-based Federated SPARQL Query Processing Engines}}},
  doi          = {{10.3233/SW-200420}},
  volume       = {{12}},
  year         = {{2021}},
}

@inproceedings{19606,
  abstract     = {{Mobile shopping apps have been using Augmented Reality (AR) in the last years to place their products in the environment of the customer. While this is possible with atomic 3D objects, there is is still a lack in the runtime conﬁguration of 3D object compositions based on user needs and environmental constraints. For this, we previously developed an approach for model-based AR-assisted product conﬁguration based on the concept of Dynamic Software Product Lines. In this demonstration paper, we present the corresponding tool support ProConAR in the form of a Product Modeler and a Product Conﬁgurator. While the Product Modeler is an Angular web app that splits products (e.g. table) up into atomic parts (e.g. tabletop, table legs, funnier) and saves it within a conﬁguration model, the Product Conﬁgurator is an Android client that uses the conﬁguration model to place diﬀerent product conﬁgurations within the environment of the customer. We show technical details of our ready to use tool-chain ProConAR by describing its implementation and usage as well as pointing out future research directions.}},
  author       = {{Gottschalk, Sebastian and Yigitbas, Enes and Schmidt, Eugen and Engels, Gregor}},
  booktitle    = {{Human-Centered Software Engineering. HCSE 2020}},
  editor       = {{Bernhaupt, Regina and Ardito, Carmelo and Sauer, Stefan}},
  keywords     = {{Product Configuration, Augmented Reality, Model-based, Tool Support}},
  location     = {{Eindhoven}},
  publisher    = {{Springer}},
  title        = {{{ProConAR: A Tool Support for Model-based AR Product Configuration}}},
  doi          = {{10.1007/978-3-030-64266-2_14}},
  volume       = {{12481}},
  year         = {{2020}},
}

@inproceedings{16934,
  abstract     = {{To build successful products, the developers have to adapt their product features and business models to uncertain customer needs. This adaptation is part of the research discipline of Hypotheses Engineering (HE) where customer needs can be seen as hypotheses that need to be tested iteratively by conducting experiments together with the customer. So far, modeling support and associated traceability of this iterative process are missing. Both, in turn, are important to document the adaptation to the customer needs and identify experiments that provide most evidence to the customer needs. To target this issue, we introduce a model-based HE approach with a twofold contribution: First, we develop a modeling language that models hypotheses and experiments as interrelated hierarchies together with a mapping between them. While the hypotheses are labeled with a score level of their current evidence, the experiments are labeled with a score level of maximum evidence that can be achieved during conduction. Second, we provide an iterative process to determine experiments that offer the most evidence improvement to the modeled hypotheses. We illustrate the usefulness of the approach with an example of testing the business model of a mobile application.}},
  author       = {{Gottschalk, Sebastian and Yigitbas, Enes and Engels, Gregor}},
  booktitle    = {{Business Modeling and Software Design}},
  editor       = {{Shishkov, Boris}},
  keywords     = {{Hypothesis Engineering, Model-based, Customer Need Adaptation, Business Model, Product Features}},
  location     = {{Potsdam}},
  pages        = {{276--286}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Model-based Hypothesis Engineering for Supporting Adaptation to Uncertain Customer Needs}}},
  doi          = {{10.1007/978-3-030-52306-0_18}},
  volume       = {{391}},
  year         = {{2020}},
}

@techreport{17019,
  abstract     = {{The scientific impact of research papers is multi-dimensional and can be determined quantitatively by means of citation analysis and qualitatively by means of content analysis. Accounting for the widely acknowledged limitations of pure citation analysis, we adopt a knowledge-based perspective on scientific impact to develop a methodology for content-based citation analysis which allows determining how papers have enabled knowledge development in subsequent research (knowledge impact). As knowledge development differs between research genres, we develop a new knowledgebased citation analysis methodology for the genre of standalone literature reviews (LRs). We apply the suggested methodology to the IS business value domain by manually coding 22 LRs and 1,228 citing papers (CPs) and show that the results challenge the assumption that citations indicate knowledge impact. We derive implications for distinguishing knowledge impact from citation impact in the LR genre. Finally, we develop recommendations for authors of LRs, scientific evaluation committees and editorial boards of journals how to apply and benefit from the suggested methodology, and we discuss its efficiency and automatization.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander}},
  keywords     = {{Scientific impact, knowledge impact, content-based citation analysis, methodology}},
  title        = {{{Distinguishing Knowledge Impact from Citation Impact: A Methodology for Analysing Knowledge Impact for the Literature Review Genre}}},
  year         = {{2020}},
}

@inproceedings{15580,
  abstract     = {{This paper deals with aspect phrase extraction and classification in sentiment analysis. We summarize current approaches and datasets from the domain of aspect-based sentiment analysis. This domain detects sentiments expressed for individual aspects in unstructured text data. So far, mainly commercial user reviews for products or services such as restaurants were investigated. We here present our dataset consisting of German physician reviews, a sensitive and linguistically complex field. Furthermore, we describe the annotation process of a dataset for supervised learning with neural networks. Moreover, we introduce our model for extracting and classifying aspect phrases in one step, which obtains an F1-score of 80%. By applying it to a more complex domain, our approach and results outperform previous approaches.}},
  author       = {{Kersting, Joschka and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 12th International Conference on Agents and Artificial Intelligence (ICAART 2020) --  Special Session on Natural Language Processing in Artificial Intelligence (NLPinAI 2020)}},
  keywords     = {{Deep Learning, Natural Language Processing, Aspect-based Sentiment Analysis}},
  location     = {{Valetta, Malta}},
  pages        = {{391----400}},
  publisher    = {{SCITEPRESS}},
  title        = {{{Aspect Phrase Extraction in Sentiment Analysis with Deep Learning}}},
  year         = {{2020}},
}

@article{11946,
  abstract     = {{Literature reviews (LRs) play an important role in the development of domain knowledge in all fields. Yet, we observe a
lack of insights into the activities with which LRs actually develop knowledge. To address this important gap, we (1)
derive knowledge building activities from the extant literature on LRs, (2) suggest a knowledge-based typology of LRs
that complements existing typologies, and (3) apply the suggested typology in an empirical study that explores how LRs
with different goals and methodologies have contributed to knowledge development. The analysis of 240 LRs published
in 40 renowned IS journals between 2000 and 2014 allows us to draw a detailed picture of knowledge development
achieved by one of the most important genres in the IS field. An overarching contribution of our work is to unify extant
conceptualizations of LRs by clarifying and illustrating how LRs apply different methodologies in a range of knowledge
building activities to achieve their goals with respect to theory.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander and Paré, Guy}},
  issn         = {{ 1529-3181}},
  journal      = {{Communications of the AIS}},
  keywords     = {{Literature review, knowledge development, knowledge building activities, knowledge-based typology, information systems research}},
  pages        = {{134--186}},
  title        = {{{A Knowledge Development Perspective on Literature Reviews: Validation of a New Typology in the IS Field}}},
  doi          = {{10.17705/1CAIS.04607}},
  volume       = {{46}},
  year         = {{2020}},
}

@inproceedings{48850,
  abstract     = {{Sequential model-based optimization (SMBO) approaches are algorithms for solving problems that require computationally or otherwise expensive function evaluations. The key design principle of SMBO is a substitution of the true objective function by a surrogate, which is used to propose the point(s) to be evaluated next. SMBO algorithms are intrinsically modular, leaving the user with many important design choices. Significant research efforts go into understanding which settings perform best for which type of problems. Most works, however, focus on the choice of the model, the acquisition function, and the strategy used to optimize the latter. The choice of the initial sampling strategy, however, receives much less attention. Not surprisingly, quite diverging recommendations can be found in the literature. We analyze in this work how the size and the distribution of the initial sample influences the overall quality of the efficient global optimization (EGO) algorithm, a well-known SMBO approach. While, overall, small initial budgets using Halton sampling seem preferable, we also observe that the performance landscape is rather unstructured. We furthermore identify several situations in which EGO performs unfavorably against random sampling. Both observations indicate that an adaptive SMBO design could be beneficial, making SMBO an interesting test-bed for automated algorithm design.}},
  author       = {{Bossek, Jakob and Doerr, Carola and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-7128-5}},
  keywords     = {{continuous black-box optimization, design of experiments, initial design, sequential model-based optimization}},
  pages        = {{778–786}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Initial Design Strategies and Their Effects on Sequential Model-Based Optimization: An Exploratory Case Study Based on BBOB}}},
  doi          = {{10.1145/3377930.3390155}},
  year         = {{2020}},
}

@inproceedings{48897,
  abstract     = {{In this work we focus on the well-known Euclidean Traveling Salesperson Problem (TSP) and two highly competitive inexact heuristic TSP solvers, EAX and LKH, in the context of per-instance algorithm selection (AS). We evolve instances with nodes where the solvers show strongly different performance profiles. These instances serve as a basis for an exploratory study on the identification of well-discriminating problem characteristics (features). Our results in a nutshell: we show that even though (1) promising features exist, (2) these are in line with previous results from the literature, and (3) models trained with these features are more accurate than models adopting sophisticated feature selection methods, the advantage is not close to the virtual best solver in terms of penalized average runtime and so is the performance gain over the single best solver. However, we show that a feature-free deep neural network based approach solely based on visual representation of the instances already matches classical AS model results and thus shows huge potential for future studies.}},
  author       = {{Seiler, Moritz and Pohl, Janina and Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from {Nature} (PPSN XVI)}},
  isbn         = {{978-3-030-58111-4}},
  keywords     = {{Automated algorithm selection, Deep learning, Feature-based approaches, Traveling Salesperson Problem}},
  pages        = {{48–64}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Deep Learning as a Competitive Feature-Free Approach for Automated Algorithm Selection on the Traveling Salesperson Problem}}},
  doi          = {{10.1007/978-3-030-58112-1_4}},
  year         = {{2020}},
}

@inproceedings{59220,
  author       = {{Schwabe, Tobias and Balke, Axel and Bezuidenhout, Petrone H. and Reker, Julia and Meyers, Thorsten and Joubert, Trudi-Heleen and Hilleringmann, Ulrich}},
  booktitle    = {{Fifth Conference on Sensors, MEMS, and Electro-Optic Systems}},
  editor       = {{du Plessis, Monuko}},
  keywords     = {{sensing, zinc oxide, thin-film transistor, oxygen measurement, low-cost electronics, water quality analysis, printable electronics, flexible electronics}},
  pages        = {{1104316}},
  publisher    = {{SPIE}},
  title        = {{{Oxygen detection with zinc oxide nanoparticle structures}}},
  doi          = {{10.1117/12.2501507}},
  volume       = {{11043}},
  year         = {{2019}},
}

@article{4947,
  abstract     = {{Manufacturers increasingly integrate information and communication technologies into their products so that they can provide IT-based services. Organizations that formerly concentrated on transactional sales thus confront a new challenge associated with managing service usage—retention and extracting value from investments in smart technology. This study combines a marketing and an information systems perspective in a field study conducted jointly with a large European car manufacturer. Understanding the renewal decision for IT-based service contracts requires knowledge from both disciplines. The paper shows that combining behavioral predictor variables stemming from marketing research and technology-related perceptual variables stemming from technology acceptance research increases the explanatory power and prediction accuracy of forecasting models for customer renewal decisions. Specifically, the authors show that perceptions of usefulness become more important the longer customers use IT-based services and the more services they use within the service contract.}},
  author       = {{Wangenheim, Florian v and Wünderlich, Nancy and Schumann, Jan H}},
  journal      = {{Journal of Business Research}},
  keywords     = {{IT-based service, Smart services, Contract renewal, Retention, Customer churn, Free trial}},
  number       = {{79}},
  pages        = {{181----188}},
  publisher    = {{Elsevier}},
  title        = {{{Renew or cancel? Drivers of customer renewal decisions for IT-based service contracts}}},
  year         = {{2017}},
}

@inproceedings{9978,
  abstract     = {{Piezoelectric transducers are used in a wide range of applications. Reliability of these transducers is an important aspect in their application. Prognostics, which involve continuous monitoring of the health of technical systems and using this information to estimate the current health state and consequently predict the remaining useful lifetime (RUL), can be used to increase the reliability, safety, and availability of the transducers. This is achieved by utilizing the health state and RUL predictions to adaptively control the usage of the components or to schedule appropriate maintenance without interrupting operation. In this work, a prognostic approach utilizing self-sensing, where electric signals of a piezoelectric transducer are used as the condition monitoring data, is proposed. The approach involves training machine learning algorithms to model the degradation of the transducers through a health index and the use of the learned model to estimate the health index of similar transducers. The current health index is then used to estimate RUL of test components. The feasibility of the approach is demonstrated using piezoelectric bimorphs and the results show that the method is accurate in predicting the health index and RUL.}},
  author       = {{Kimotho, James Kuria and Sextro, Walter and Hemsel, Tobias}},
  booktitle    = {{IEEE Transactions on Reliability}},
  keywords     = {{Estimation of Remaining Useful Lifetime of Piezoelectric Transducers Based on Self-Sensing}},
  pages        = {{1 -- 10}},
  title        = {{{Estimation of Remaining Useful Lifetime of Piezoelectric Transducers Based on Self-Sensing}}},
  doi          = {{10.1109/TR.2017.2710260}},
  year         = {{2017}},
}

@inproceedings{10676,
  author       = {{Ho, Nam and Kaufmann, Paul and Platzner, Marco}},
  booktitle    = {{2017 International Conference on Field Programmable Technology (ICFPT)}},
  keywords     = {{Linux, cache storage, microprocessor chips, multiprocessing systems, LEON3-Linux based multicore processor, MiBench suite, block sizes, cache adaptation, evolvable caches, memory-to-cache-index mapping function, processor caches, reconfigurable cache mapping optimization, reconfigurable hardware technology, replacement strategies, standard Linux OS, time a complete hardware implementation, Hardware, Indexes, Linux, Measurement, Multicore processing, Optimization, Training}},
  pages        = {{215--218}},
  title        = {{{Evolvable caches: Optimization of reconfigurable cache mappings for a LEON3/Linux-based multi-core processor}}},
  doi          = {{10.1109/FPT.2017.8280144}},
  year         = {{2017}},
}

@inproceedings{10780,
  author       = {{Guettatfi, Zakarya and Hübner, Philipp and Platzner, Marco and Rinner, Bernhard}},
  booktitle    = {{12th International Symposium on Reconfigurable Communication-centric Systems-on-Chip (ReCoSoC)}},
  keywords     = {{embedded systems, image sensors, power aware computing, wireless sensor networks, Zynq-based VSN node prototype, computational self-awareness, design approach, platform levels, power consumption, visual sensor networks, visual sensor nodes, Cameras, Hardware, Middleware, Multicore processing, Operating systems, Runtime, Reconfigurable platforms, distributed embedded systems, performance-resource trade-off, self-awareness, visual sensor nodes}},
  pages        = {{1--8}},
  title        = {{{Computational self-awareness as design approach for visual sensor nodes}}},
  doi          = {{10.1109/ReCoSoC.2017.8016147}},
  year         = {{2017}},
}

@article{36481,
  abstract     = {{Recent studies highlight early childhood teachers’ mathematics-related competence. Developing this competence should be a main aspect of early childhood teachers’ education. This is, however, not the case in all countries. Consequently, high-quality professional development courses are needed. Based on research results, we developed a competence-oriented continuous professional development course ("EmMa") and examined the effects of "EmMa" by asking: How does "EmMa" affect the development of early childhood teachers’ i) mathematical content knowledge, ii) mathematical pedagogical content knowledge and iii) beliefs towards mathematics in general? To answer these questions, we conducted a pre-test/post-test study including a control group with 99 in-service early childhood teachers. Results show that the course affected teachers’ mathematical pedagogical content knowledge and static orientation towards mathematics positively. From this we conclude that scaling-up "EmMa" might be a suitable approach to bridge the gap between pre-service education with nearly no mathematics and the challenges of early mathematics education.}},
  author       = {{Bruns, Julia and Eichen, Lars and Gasteiger, Hedwig}},
  journal      = {{Mathematics Teacher Education and Development (MTED)}},
  keywords     = {{Beliefs, Competency Based Teacher Education, Control Groups, Early Childhood Education, Faculty Development, Foreign Countries, Inservice Teacher Education, Intervention, Mathematical Aptitude, Mathematics Skills, Pedagogical Content Knowledge, Preschool Teachers, Pretests Posttests, Professional Continuing Education, Statistical Analysis, Teacher Competency Testing}},
  number       = {{3}},
  pages        = {{76–93}},
  title        = {{{Mathematics-related Competence of Early Childhood Teachers Visiting a Continuous Professional Development Course: An Intervention Study}}},
  volume       = {{19}},
  year         = {{2017}},
}

@inproceedings{191,
  abstract     = {{One purpose of requirement refinement is that higher-level requirements have to be translated to something usable by developers. Since customer requirements are often written in natural language by end users, they lack precision, completeness and consistency. Although user stories are often used in the requirement elicitation process in order to describe the possibilities how to interact with the software, there is always something unspoken. Here, we present techniques how to automatically refine vague software descriptions. Thus, we can bridge the gap by first revising natural language utterances from higher-level to more detailed customer requirements, before functionality matters. We therefore focus on the resolution of semantically incomplete user-generated sentences (i.e. non-instantiated arguments of predicates) and provide ontology-based gap-filling suggestions how to complete unverbalized information in the user’s demand.}},
  author       = {{Geierhos, Michaela and Bäumer, Frederik Simon}},
  booktitle    = {{Proceedings of the 21st International Conference on Applications of Natural Language to Information Systems (NLDB)}},
  editor       = {{Métais, Elisabeth  and Meziane, Farid  and Saraee, Mohamad  and Sugumaran, Vijayan  and Vadera, Sunil }},
  isbn         = {{978-3-319-41753-0}},
  keywords     = {{Requirement refinement, Concept expansion, Ontology-based instantiation of predicate-argument structure}},
  location     = {{Salford, UK}},
  pages        = {{37--47}},
  publisher    = {{Springer}},
  title        = {{{How to Complete Customer Requirements: Using Concept Expansion for Requirement Refinement}}},
  doi          = {{10.1007/978-3-319-41754-7_4}},
  volume       = {{9612}},
  year         = {{2016}},
}

@article{4586,
  abstract     = {{This study examines the loan-pricing behavior of German banks for a large variety of retail and corporate loan products. We find that a bank’s operational efficiency is priced in bank loan rates and alters interest-setting behavior. Specifically, we establish that a higher degree of operational efficiency leads to lower loan markups, which makes prices more competitive and smoothes the setting of interest rates. By employing state-of-the-art stochastic frontier efficiency measures to capture a bank’s operational efficiency, we take a look at the bank customers’ perspective and demonstrate the extent to which bor-rowers benefit from cost-efficient banking. }},
  author       = {{Schlueter, Tobias and Busch, Ramona and Sievers, Soenke and Hartmann-Wendels, Thomas}},
  journal      = {{Credit and Capital Markets--Kredit und Kapital}},
  keywords     = {{interest rate pass-through models, error correction models, bank efficiency, cost efficiency, stochastic frontier analysis}},
  number       = {{1}},
  pages        = {{93--125}},
  title        = {{{Loan Pricing: Do Borrowers Benefit from Cost-Efficient Banking?}}},
  doi          = {{10.3790/ccm.49.1.93}},
  volume       = {{49}},
  year         = {{2016}},
}

