@article{34075,
  author       = {{Penner, Eduard and Caylak, Ismail and Mahnken, Rolf}},
  issn         = {{2325-3444}},
  journal      = {{Mathematics and Mechanics of Complex Systems}},
  keywords     = {{Computational Mathematics, Numerical Analysis, Civil and Structural Engineering}},
  number       = {{1}},
  pages        = {{21--50}},
  publisher    = {{Mathematical Sciences Publishers}},
  title        = {{{A polymorphic uncertainty model for the curing process of transversely fiber-reinforced plastics}}},
  doi          = {{10.2140/memocs.2022.10.21}},
  volume       = {{10}},
  year         = {{2022}},
}

@article{31691,
  abstract     = {{Sustainable product engineering is becoming increasingly important. This includes the development of environmentally friendly products and the design for recycling. In this paper a holistic method for the assessment of solution alternatives is presented, in which the stakeholder perspectives along the generic product lifecycle are taken into account. Finally, a new visualization is presented. By visualizing the results in the integrated sustainability triangle, decision-makers in product development can holistically assess the sustainability of the solution alternatives.}},
  author       = {{Gräßler, Iris and Hesse, Philipp}},
  issn         = {{2732-527X}},
  journal      = {{Proceedings of the Design Society}},
  keywords     = {{sustainability, decision making, generic product lifecycle, design analysis, ecodesign}},
  pages        = {{1001--1010}},
  publisher    = {{Cambridge University Press (CUP)}},
  title        = {{{Approach to Sustainability-Based Assessment of Solution Alternatives in Early Stages of Product Engineering}}},
  doi          = {{10.1017/pds.2022.102}},
  volume       = {{2}},
  year         = {{2022}},
}

@article{34677,
  author       = {{Black, Tobias and Wu, Chunyan}},
  issn         = {{0944-2669}},
  journal      = {{Calculus of Variations and Partial Differential Equations}},
  keywords     = {{Applied Mathematics, Analysis}},
  number       = {{3}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Prescribed signal concentration on the boundary: eventual smoothness in a chemotaxis-Navier–Stokes system with logistic proliferation}}},
  doi          = {{10.1007/s00526-022-02201-y}},
  volume       = {{61}},
  year         = {{2022}},
}

@inproceedings{56993,
  author       = {{Schaffer, Michael and Lea, Budde and Schulte, Carsten and Buhl, Heike M.}},
  booktitle    = {{52nd DGPs Congress  - Abstracts}},
  editor       = {{Bermeitinger, Christina and  Greve, Werner}},
  keywords     = {{Cognition, Motivation, Technical Model, Mental Model, Explainer, Explainee, Qualitative Content Analysis}},
  location     = {{Hildesheim}},
  title        = {{{Die Anpassungen von Erklärungen an das Verständnis des Erklärgegenstandes der Gesprächspartner}}},
  year         = {{2022}},
}

@article{50025,
  author       = {{Feng, Yuanhua and Gries, Thomas and Letmathe, Sebastian and Schulz, Dominik}},
  issn         = {{2073-4859}},
  journal      = {{The R Journal}},
  keywords     = {{Statistics, Probability and Uncertainty, Numerical Analysis, Statistics and Probability}},
  number       = {{1}},
  pages        = {{182--195}},
  publisher    = {{The R Foundation}},
  title        = {{{The smoots Package in R for Semiparametric Modeling of Trend Stationary Time Series}}},
  doi          = {{10.32614/rj-2022-017}},
  volume       = {{14}},
  year         = {{2022}},
}

@article{30511,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Many critical codebases are written in C, and most of them use preprocessor directives to encode variability, effectively encoding software product lines. These preprocessor directives, however, challenge any static code analysis. SPLlift, a previously presented approach for analyzing software product lines, is limited to Java programs that use a rather simple feature encoding and to analysis problems with a finite and ideally small domain. Other approaches that allow the analysis of real-world C software product lines use special-purpose analyses, preventing the reuse of existing analysis infrastructures and ignoring the progress made by the static analysis community. This work presents <jats:sc>VarAlyzer</jats:sc>, a novel static analysis approach for software product lines. <jats:sc>VarAlyzer</jats:sc> first transforms preprocessor constructs to plain C while preserving their variability and semantics. It then solves any given distributive analysis problem on transformed product lines in a variability-aware manner. <jats:sc>VarAlyzer</jats:sc> ’s analysis results are annotated with feature constraints that encode in which configurations each result holds. Our experiments with 95 compilation units of OpenSSL show that applying <jats:sc>VarAlyzer</jats:sc> enables one to conduct inter-procedural, flow-, field- and context-sensitive data-flow analyses on entire product lines for the first time, outperforming the product-based approach for highly-configurable systems.</jats:p>}},
  author       = {{Schubert, Philipp and Gazzillo, Paul and Patterson, Zach and Braha, Julian and Schiebel, Fabian Benedikt and Hermann, Ben and Wei, Shiyi and Bodden, Eric}},
  issn         = {{0928-8910}},
  journal      = {{Automated Software Engineering}},
  keywords     = {{inter-procedural static analysis, software product lines, preprocessor, LLVM, C/C++}},
  number       = {{1}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Static data-flow analysis for software product lines in C}}},
  doi          = {{10.1007/s10515-022-00333-1}},
  volume       = {{29}},
  year         = {{2022}},
}

@article{47627,
  abstract     = {{In Germany, dual learning programmes are increasingly offered by higher education institutions. These programmes’ main characteristic and greatest challenge is their integration of academic and vocational learning. So far, this challenge has frequently been stated without specifying its exact nature and consequences for learners. The present study addresses this pedagogical research gap and examines the extent of variation in the degree of integration among dual study programmes. With reference to curriculum theory, the study develops an empirical typology of curricular integration in dual programmes. The data sample consists of 152 programmes at (dual) universities and universities of applied sciences. Data is analysed using hierarchical cluster analysis. Results indicate that the currently prevailing forms of curricular integration should best be differentiated according to five types. The five overlapping types of integration are located on a continuum ranging from parallelism through organisational linking to full curricular integration targeted immediately at students’ personal integration. The analysis confirms that there are problems with complying with integration standards set on the policy level. Above all, the study offers new insights on what marks the diverse integration landscape of dual study programmes. It proves that approaches to integration are more differentiated than previous research has shown.}},
  author       = {{Mordhorst, Lisa and Jenert, Tobias}},
  issn         = {{0018-1560}},
  journal      = {{Higher Education}},
  keywords     = {{Dual study programmes · Curricular integration · Curriculum theory · Empirical typology · Cluster analysis · Germany}},
  number       = {{6}},
  pages        = {{1257--1279}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Curricular integration of academic and vocational education: a theory-based empirical typology of dual study programmes in Germany}}},
  doi          = {{10.1007/s10734-022-00889-7}},
  volume       = {{85}},
  year         = {{2022}},
}

@inproceedings{23388,
  abstract     = {{As one of the most popular programming languages, PYTHON has become a relevant target language for static analysis tools. The primary data structure for performing an inter-procedural static analysis is call-graph (CG), which links call sites to potential call targets in a program. There exists multiple algorithms for constructing callgraphs, tailored to specific languages. However, comparatively few implementations target PYTHON. Moreover, there is still lack of empirical evidence as to how these few algorithms perform in terms of precision and recall. This paper thus presents EVAL_CG, an extensible framework for comparative analysis of Python call-graphs. We conducted two experiments which run the CG algorithms on different Python programming constructs and real-world applications. In both experiments, we evaluate three CG generation frameworks namely, Code2flow, Pyan, and Wala. We record precision, recall, and running time, and identify sources of unsoundness of each framework. Our evaluation shows that none of the current CG construction frameworks produce a sound CG. Moreover, the static CGs contain many spurious edges. Code2flow is also comparatively slow. Hence, further research is needed to support CG generation for Python programs.}},
  author       = {{Kummita, Sriteja and Piskachev, Goran and Spaeth, Johannes and Bodden, Eric}},
  booktitle    = {{Proceedings of the 2021 International Conference on Code Quality (ICCQ)}},
  isbn         = {{978-1-7281-8477-7}},
  keywords     = {{Static Analysis, Callgraph Analysis, Python, Qualitative Analysis, Quantitative Analysis, Empirical Evaluation}},
  location     = {{Virtual}},
  title        = {{{Qualitative and Quantitative Analysis of Callgraph Algorithms for PYTHON}}},
  doi          = {{10.1109/ICCQ51190.2021.9392986}},
  year         = {{2021}},
}

@article{20844,
  abstract     = {{Review papers are essential for knowledge development in IS. While some are cited twice a day, others accumulate single digit citations over a decade. The magnitude of these differences prompts us to analyze what distinguishes those reviews that have proven to be integral to scientific progress from those that might be considered less impactful. Our results highlight differences between reviews aimed at describing, understanding, explaining, and theory testing. Beyond the control variables, they demonstrate the importance of methodological transparency and the development of research agendas. These insights inform all stakeholders involved in the development and publication of review papers.}},
  author       = {{Wagner, Gerit and Prester, Julian and Roche, Maria and Schryen, Guido and Benlian, Alexander and Paré, Guy and Templier, Mathieu}},
  journal      = {{Information & Management}},
  keywords     = {{Literature review, review papers, scientometric, scientific impact, citation analysis}},
  number       = {{3}},
  title        = {{{Which Factors Affect the Scientific Impact of Review Papers in IS Research? A Scientometric Study}}},
  volume       = {{58}},
  year         = {{2021}},
}

@article{33649,
  author       = {{Kessler, Jan and Calcavecchia, Francesco and Kühne, Thomas}},
  issn         = {{2513-0390}},
  journal      = {{Advanced Theory and Simulations}},
  keywords     = {{Multidisciplinary, Modeling and Simulation, Numerical Analysis, Statistics and Probability}},
  number       = {{4}},
  publisher    = {{Wiley}},
  title        = {{{Artificial Neural Networks as Trial Wave Functions for Quantum Monte Carlo}}},
  doi          = {{10.1002/adts.202000269}},
  volume       = {{4}},
  year         = {{2021}},
}

@article{34818,
  author       = {{Hanusch, Maximilian}},
  issn         = {{0926-2245}},
  journal      = {{Differential Geometry and its Applications}},
  keywords     = {{Geometry and Topology, Analysis}},
  publisher    = {{Elsevier BV}},
  title        = {{{Symmetries of analytic curves}}},
  doi          = {{10.1016/j.difgeo.2020.101687}},
  volume       = {{74}},
  year         = {{2021}},
}

@article{46318,
  abstract     = {{Multi-objective (MO) optimization, i.e., the simultaneous optimization of multiple conflicting objectives, is gaining more and more attention in various research areas, such as evolutionary computation, machine learning (e.g., (hyper-)parameter optimization), or logistics (e.g., vehicle routing). Many works in this domain mention the structural problem property of multimodality as a challenge from two classical perspectives: (1) finding all globally optimal solution sets, and (2) avoiding to get trapped in local optima. Interestingly, these streams seem to transfer many traditional concepts of single-objective (SO) optimization into claims, assumptions, or even terminology regarding the MO domain, but mostly neglect the understanding of the structural properties as well as the algorithmic search behavior on a problem’s landscape. However, some recent works counteract this trend, by investigating the fundamentals and characteristics of MO problems using new visualization techniques and gaining surprising insights. Using these visual insights, this work proposes a step towards a unified terminology to capture multimodality and locality in a broader way than it is usually done. This enables us to investigate current research activities in multimodal continuous MO optimization and to highlight new implications and promising research directions for the design of benchmark suites, the discovery of MO landscape features, the development of new MO (or even SO) optimization algorithms, and performance indicators. For all these topics, we provide a review of ideas and methods but also an outlook on future challenges, research potential and perspectives that result from recent developments.}},
  author       = {{Grimme, Christian and Kerschke, Pascal and Aspar, Pelin and Trautmann, Heike and Preuss, Mike and Deutz, André H. and Wang, Hao and Emmerich, Michael}},
  issn         = {{0305-0548}},
  journal      = {{Computers & Operations Research}},
  keywords     = {{Multimodal optimization, Multi-objective continuous optimization, Landscape analysis, Visualization, Benchmarking, Theory, Algorithms}},
  pages        = {{105489}},
  title        = {{{Peeking beyond peaks: Challenges and research potentials of continuous multimodal multi-objective optimization}}},
  doi          = {{https://doi.org/10.1016/j.cor.2021.105489}},
  volume       = {{136}},
  year         = {{2021}},
}

@inproceedings{48860,
  abstract     = {{In the area of evolutionary computation the calculation of diverse sets of high-quality solutions to a given optimization problem has gained momentum in recent years under the term evolutionary diversity optimization. Theoretical insights into the working principles of baseline evolutionary algorithms for diversity optimization are still rare. In this paper we study the well-known Minimum Spanning Tree problem (MST) in the context of diversity optimization where population diversity is measured by the sum of pairwise edge overlaps. Theoretical results provide insights into the fitness landscape of the MST diversity optimization problem pointing out that even for a population of {$\mu$} = 2 fitness plateaus (of constant length) can be reached, but nevertheless diverse sets can be calculated in polynomial time. We supplement our theoretical results with a series of experiments for the unconstrained and constraint case where all solutions need to fulfill a minimal quality threshold. Our results show that a simple ({$\mu$} + 1)-EA can effectively compute a diversified population of spanning trees of high quality.}},
  author       = {{Bossek, Jakob and Neumann, Frank}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-8350-9}},
  keywords     = {{evolutionary algorithms, evolutionary diversity optimization, minimum spanning tree, runtime analysis}},
  pages        = {{198–206}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Evolutionary Diversity Optimization and the Minimum Spanning Tree Problem}}},
  doi          = {{10.1145/3449639.3459363}},
  year         = {{2021}},
}

@inbook{48862,
  abstract     = {{Most runtime analyses of randomised search heuristics focus on the expected number of function evaluations to find a unique global optimum. We ask a fundamental question: if additional search points are declared optimal, or declared as desirable target points, do these additional optima speed up evolutionary algorithms? More formally, we analyse the expected hitting time of a target set OPT {$\cup$} S where S is a set of non-optimal search points and OPT is the set of optima and compare it to the expected hitting time of OPT. We show that the answer to our question depends on the number and placement of search points in S. For all black-box algorithms and all fitness functions we show that, if additional optima are placed randomly, even an exponential number of optima has a negligible effect on the expected optimisation time. Considering Hamming balls around all global optima gives an easier target for some algorithms and functions and can shift the phase transition with respect to offspring population sizes in the (1,{$\lambda$}) EA on One-Max. Finally, on functions where search trajectories typically join in a single search point, turning one search point into an optimum drastically reduces the expected optimisation time.}},
  author       = {{Bossek, Jakob and Sudholt, Dirk}},
  booktitle    = {{Proceedings of the 16th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}},
  isbn         = {{978-1-4503-8352-3}},
  keywords     = {{evolutionary algorithms, pseudo-boolean functions, runtime analysis, theory}},
  pages        = {{1–11}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Do Additional Optima Speed up Evolutionary Algorithms?}}},
  year         = {{2021}},
}

@article{48854,
  abstract     = {{We contribute to the theoretical understanding of randomized search heuristics for dynamic problems. We consider the classical vertex coloring problem on graphs and investigate the dynamic setting where edges are added to the current graph. We then analyze the expected time for randomized search heuristics to recompute high quality solutions. The (1+1) Evolutionary Algorithm and RLS operate in a setting where the number of colors is bounded and we are minimizing the number of conflicts. Iterated local search algorithms use an unbounded color palette and aim to use the smallest colors and, consequently, the smallest number of colors. We identify classes of bipartite graphs where reoptimization is as hard as or even harder than optimization from scratch, i.e., starting with a random initialization. Even adding a single edge can lead to hard symmetry problems. However, graph classes that are hard for one algorithm turn out to be easy for others. In most cases our bounds show that reoptimization is faster than optimizing from scratch. We further show that tailoring mutation operators to parts of the graph where changes have occurred can significantly reduce the expected reoptimization time. In most settings the expected reoptimization time for such tailored algorithms is linear in the number of added edges. However, tailored algorithms cannot prevent exponential times in settings where the original algorithm is inefficient.}},
  author       = {{Bossek, Jakob and Neumann, Frank and Peng, Pan and Sudholt, Dirk}},
  issn         = {{0178-4617}},
  journal      = {{Algorithmica}},
  keywords     = {{Dynamic optimization, Evolutionary algorithms, Running time analysis}},
  number       = {{10}},
  pages        = {{3148–3179}},
  title        = {{{Time Complexity Analysis of Randomized Search Heuristics for the Dynamic Graph Coloring Problem}}},
  doi          = {{10.1007/s00453-021-00838-3}},
  volume       = {{83}},
  year         = {{2021}},
}

@article{28696,
  abstract     = {{The aim of the present study is to bring new momentum into research on students’
understanding of academic writing. Drawing on the idea that metaphors give insight into
implicit conceptions of abstract entities and processes, we developed a detailed and
differentiated set of conceptual metaphors that can be used to study student ideas about
writing in research, teaching, and interventions. A large sample of undergraduates produced
their everyday understanding of writing in short texts beginning with a self-generated
metaphor. Based on theories from cognitive linguistics, the conceptual metaphors in their
texts were analyzed in terms of their action quality (transitivity) and spatiality (spatial
primitives). The undergraduates’ conceptualizations were very heterogeneous. Most
metaphors depart strongly from scientific approaches to academic writing within cognitive
psychology and sociocultural theory. Roughly half of the metaphors could be collated to one
of four metaphor systems. Depending on the desired degree of abstraction or concreteness,
conceptual metaphors or metaphor systems can be employed in further studies to illuminate
thinking about writing.}},
  author       = {{Scharlau, Ingrid and Karsten, Andrea and Rohlfing, Katharina}},
  issn         = {{2030-1006}},
  journal      = {{Journal of Writing Research}},
  keywords     = {{metaphor analysis, academic writing, transitivity, spatial primitives}},
  number       = {{3}},
  pages        = {{493--529}},
  title        = {{{Building, emptying out, or dreaming? Action structures and space in undergraduates’ metaphors of academic writing}}},
  doi          = {{10.17239/jowr-2021.12.03.01}},
  volume       = {{12}},
  year         = {{2021}},
}

@article{27841,
  abstract     = {{Verification of software and processor hardware usually proceeds separately, software analysis relying on the correctness of processors executing machine instructions. This assumption is valid as long as the software runs on standard CPUs that have been extensively validated and are in wide use. However, for processors exploiting custom instruction set extensions to meet performance and energy constraints the validation might be less extensive, challenging the correctness assumption. In this paper we present a novel formal approach for hardware/software co-verification targeting processors with custom instruction set extensions. We detail two different approaches for checking whether the hardware fulfills the requirements expected by the software analysis. The approaches are designed to explore a trade-off between generality of the verification and computational effort. Then, we describe the integration of software and hardware analyses for both techniques and describe a fully automated tool chain implementing the approaches. Finally, we demonstrate and compare the two approaches on example source code with custom instructions, using state-of-the-art software analysis and hardware verification techniques.}},
  author       = {{Jakobs, Marie-Christine and Pauck, Felix and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}},
  journal      = {{IEEE Access}},
  keywords     = {{Software Analysis, Abstract Interpretation, Custom Instruction, Hardware Verification}},
  publisher    = {{IEEE}},
  title        = {{{Software/Hardware Co-Verification for Custom Instruction Set Processors}}},
  doi          = {{10.1109/ACCESS.2021.3131213}},
  year         = {{2021}},
}

@inproceedings{24280,
  abstract     = {{Challenges in decisions on technical changes are the lack of knowledge about the expected impact and change propagation. Currently, no literature study contains a systematic differentiation and evaluation of existing approaches, which is a prerequisite for practitioners to select a suitable approach. This research aims at defining differentiation criteria as well as generally applicable requirements for evaluation. A four-step approach is used: systematic literature review on approaches for impact analysis of engineering changes (1), categorization and prioritization of approaches based on reoccuring elements (2), derivation of context specific requirements for evaluation (3), and evaluation of approaches (4). The result indicates existing potential of object-oriented modeling approaches.}},
  author       = {{Gräßler, Iris and Wiechel, Dominik}},
  booktitle    = {{DS 111: Proceedings of the 32nd Symposium Design for X}},
  editor       = {{Krause, Dieter and Paetzold, Kristin and Wartzack, Sandro}},
  keywords     = {{Engineering Change Management, Impact Analysis, Engineering  Changes, Model-based Systems Engineering, Product Developmen}},
  location     = {{Tutzing}},
  title        = {{{Systematische Bewertung von Auswirkungsanalysen des Engineering Change Managements}}},
  doi          = {{10.35199/dfx2021.12}},
  year         = {{2021}},
}

@inbook{57884,
  abstract     = {{Although music apps are becoming increasingly popular, there has been little research on informal music practices with apps. This article presents findings of an ongoing study on learning processes and aesthetic experiences with informal appmusic practices. In particular, it discusses the aesthetic practices (Reckwitz, 2008b) of using specific places for making music. In our grounded theory study (Charmaz, 2014) we collected data using interviews, participant observation and videography. As exemplary cases, this article presents two analyses of the use of ‘inspiring places’ and ‘safe places’. The results suggest that perceiving the atmosphere is a fundamental prerequisite for both places. Additionally, the results shed light on aesthetic aspects of mobile music making. (DIPF/Orig.)}},
  author       = {{Eusterbrock, Linus and Godau, Marc and Haenisch, Matthias and Krebs, Matthias and Rolle, Christian}},
  booktitle    = {{Musikpädagogik im Spannungsfeld von Reflexion und Intervention}},
  editor       = {{Hasselhorn, Johannes and Kautny, Oliver and Platz, Friedrich}},
  keywords     = {{Education, Ästhetik, Schul- und Bildungswesen, Informal learning, Informelles Lernen, Musical education, Musikpädagogik, Anwendung, Ästhetische Erfahrung, Grounded Theory, Längsschnittuntersuchung, Learning process, Lernprozess, Longitudinal analysis, Longitudinal study, Mobiles Gerät, Music reading, Musizieren, Erziehung}},
  pages        = {{155–172}},
  publisher    = {{Waxmann}},
  title        = {{{Von ’inspirierenden Orten’ und ’Safe Places’. Die ästhetische Nutzung von Orten in der Appmusikpraxis}}},
  volume       = {{41}},
  year         = {{2021}},
}

@techreport{17019,
  abstract     = {{The scientific impact of research papers is multi-dimensional and can be determined quantitatively by means of citation analysis and qualitatively by means of content analysis. Accounting for the widely acknowledged limitations of pure citation analysis, we adopt a knowledge-based perspective on scientific impact to develop a methodology for content-based citation analysis which allows determining how papers have enabled knowledge development in subsequent research (knowledge impact). As knowledge development differs between research genres, we develop a new knowledgebased citation analysis methodology for the genre of standalone literature reviews (LRs). We apply the suggested methodology to the IS business value domain by manually coding 22 LRs and 1,228 citing papers (CPs) and show that the results challenge the assumption that citations indicate knowledge impact. We derive implications for distinguishing knowledge impact from citation impact in the LR genre. Finally, we develop recommendations for authors of LRs, scientific evaluation committees and editorial boards of journals how to apply and benefit from the suggested methodology, and we discuss its efficiency and automatization.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander}},
  keywords     = {{Scientific impact, knowledge impact, content-based citation analysis, methodology}},
  title        = {{{Distinguishing Knowledge Impact from Citation Impact: A Methodology for Analysing Knowledge Impact for the Literature Review Genre}}},
  year         = {{2020}},
}

