@article{61463,
  abstract     = {{Vernetztes Wissen ist ein zentrales Lernziel des Hochschulstudiums, insbesondere im interdisziplinär angelegten Studienfach Komparatistik. Um den Aufbau vernetzten Wissens bei Bachelorstudierenden der Komparatistik zu unterstützen, ist in diesem Projekt Portfolioarbeit eingesetzt worden, die als Methode selbsttätigen und selbstreflexiven Lernens geeignet erscheint, zur Auseinandersetzung mit Lerninhalten zu motivieren und zur Kompetenzentwicklung der Studierenden beizutragen. Mittels unstrukturierter Beobachtungen der Portfolioarbeit im Seminar sind inhaltliche Effekte und methodische Entwicklungen erfasst worden. Anhand anteilig quantitativer, überwiegend qualitativer Inhaltsanalysen der Portfolios sind konkrete Vernetzungen zwischen Lerninhalten ermittelt worden. Die Explorationsstudie zeigt veränderte Perspektiven und geweckte Interessen bei den Studierenden durch die Portfolioarbeit sowie vielfältige Kontextualisierungen, Vergleiche und Verknüpfungen in den Portfolios auf und bietet hierdurch einen möglichen Ansatzpunkt für strukturelle Empfehlungen für das Studienfach Komparatistik.}},
  author       = {{Hannebohm, Ronja}},
  issn         = {{2199–8825}},
  journal      = {{die hochschullehre: Interdisziplinäre Zeitschrift für Studium und Lehre}},
  keywords     = {{Portfolioarbeit, portfolio work, vernetztes Wissen, knowledge networks, Beobachtung, naturalistic observation, Inhaltsanalyse, content analysis}},
  pages        = {{65--80}},
  publisher    = {{wbv}},
  title        = {{{Potenziale der Portfolioarbeit für den Aufbau vernetzten Wissens im Bachelorstudium: Eine Explorationsstudie im Studienfach Komparatistik/Vergleichende Literatur- und Kulturwissenschaft}}},
  doi          = {{10.3278/HSL2606W}},
  volume       = {{12}},
  year         = {{2026}},
}

@inproceedings{64823,
  abstract     = {{Current legal frameworks enforce that Android developers accurately report the data their apps collect. However, large codebases can make this reporting challenging. This paper employs an empirical approach to understand developers' experience with Google Play Store's Data Safety Section (DSS) form.

We first survey 41 Android developers to understand how they categorize privacy-related data into DSS categories and how confident they feel when completing the DSS form. To gain a broader and more detailed view of the challenges developers encounter during the process, we complement the survey with an analysis of 172 online developer discussions, capturing the perspectives of 642 additional developers. Together, these two data sources represent insights from 683 developers.

Our findings reveal that developers often manually classify the privacy-related data their apps collect into the data categories defined by Google-or, in some cases, omit classification entirely-and rely heavily on existing online resources when completing the form. Moreover, developers are generally confident in recognizing the data their apps collect, yet they lack confidence in translating this knowledge into DSS-compliant disclosures. Key challenges include issues in identifying privacy-relevant data to complete the form, limited understanding of the form, and concerns about app rejection due to discrepancies with Google's privacy requirements.
These results underscore the need for clearer guidance and more accessible tooling to support developers in meeting privacy-aware reporting obligations. }},
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Soliman, Mohamed Aboubakr Mohamed and Bodden, Eric}},
  booktitle    = {{Proceedings of the IEEE/ACM 13th International Conference on Mobile Software Engineering and Systems (MOBILESoft '26). Association for Computing Machinery, New York, NY, USA, 65–68.}},
  keywords     = {{static analysis, data collection, data protection, privacy-aware reporting}},
  location     = {{Rio de Janeiro, Brazil}},
  title        = {{{Challenges in Android Data Disclosure: An Empirical Study.}}},
  year         = {{2026}},
}

@inproceedings{59483,
  abstract     = {{<jats:p>Abstract. The assessment of mechanically joined connections, such as clinched connections, is usually conducted destructively. Applicable non-destructive testing methods like computed tomography are time-consuming and costly, or, like electrical resistance measurement, provide only a limited amount of information. A fast, non-destructive evaluation of the joints condition shall be made possible by using transient dynamic analysis (TDA). It is based on the introduction of sound waves and the evaluation of the response behavior after passing through the structure. This study focuses the application of TDA to clinched shear connections to evaluate the performance of the tactile measuring setup. Twenty-one series were investigated, covering variations in joining task, manufacturing and defect. The evaluation was carried out using machine learning to determine for which series characteristic signals may be detected. It was shown that a classification of the investigated specimens is possible, whereby the classification accuracy depends on the examined variation. Furthermore, the accuracy was evaluated as a function of frequency and results were concluded to identify the limits of the used measuring setup.</jats:p>}},
  author       = {{Reschke, Gregor and Brosius, Alexander}},
  booktitle    = {{Materials Research Proceedings}},
  issn         = {{2474-395X}},
  keywords     = {{Joining, Machine Learning, Transient Dynamic Analysis}},
  location     = {{Paderborn}},
  pages        = {{293--300}},
  publisher    = {{Materials Research Forum LLC}},
  title        = {{{Transient dynamic analysis: Performance evaluation of tactile measurement}}},
  doi          = {{10.21741/9781644903551-36}},
  volume       = {{52}},
  year         = {{2025}},
}

@article{59995,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>Ultrasonic transmission measurements can be used for material characterization, as the propagation time of sound waves and thus their velocity depends on the elastic material parameters. Measurement results for the elastic material parameters are acquired non-destructively using ultrasonic transmission measurements of hollow cylindrical polymer specimens. To determine the material parameters, an inverse approach is used comparing measurements with simulated data. Previous studies show that the procedure exhibits low sensitivity with respect to the shear parameters of the material. In order to increase the sensitivity, we propose to apply a spatially annular excitation on the base of the specimen. As a measure to analyse the sensitivities with respect to all parameters and their linear independence, we observe the volume of the parallelotope of the sensitivity vectors. Here, a scaled boundary finite element formulation of wave propagation in the specimen is expanded to yield derivative information directly, and a sensitivity analysis can be carried out efficiently. Finally, the results of this sensitivity analysis with regard to the annular excitation are also applied to the measurement setup.</jats:p>}},
  author       = {{Dreiling, Dmitrij and Itner, Dominik and Gravenkamp, Hauke and Claes, Leander and Birk, Carolin and Henning, Bernd}},
  issn         = {{0957-0233}},
  journal      = {{Measurement Science and Technology}},
  keywords     = {{Sensitivity analysis, Ultrasonic transducer, Guided waves, Polymers, Gram determinant}},
  publisher    = {{IOP Publishing}},
  title        = {{{Increasing the sensitivity of ultrasonic transmission measurements for elastic material parameter estimation}}},
  doi          = {{10.1088/1361-6501/add9b6}},
  volume       = {{36}},
  year         = {{2025}},
}

@inbook{58821,
  abstract     = {{Susan Stebbing wrote only once on Wittgenstein, in her paper ‘Logical Positivism and Analysis’ (1933). The paper was unusually critical of Wittgenstein. It put the Cambridge analytic philosophy of Moore and Russell in a sharp opposition to the positivist philosophy of the Vienna Circle, in which Stebbing included Wittgenstein. Whereas the positivists were interested in analysing language, the Cambridge realists were analysing facts. To be more explicit, the analytic philosophers were engaged in directional analysis, which seeks to illuminate (to elucidate) the multiplicity of the analysed facts. In contrast, positivists aimed at a final analysis that proves that there are simples. Stebbing’s sympathies were clearly on the side of the Cambridge realists. The important implication of Stebbing’s paper was that it urged Wittgenstein to change the style of his philosophy, abandoning those points which allegedly connected him with the Vienna Circle.}},
  author       = {{Milkov, Nikolay}},
  booktitle    = {{Wittgenstein and Other Philosophers: His Influence on Historical and Contemporary Analytic Philosophers, vol. II}},
  editor       = {{Khani , Ali Hossein  and Kemp , Gary }},
  keywords     = {{directional analysis, elucidation, facts, metaphysics, G. E. Moore, Russell, Stebbing, John Wisdom, Wittgenstein}},
  publisher    = {{Routledge}},
  title        = {{{Stebbing's Wittgenstein}}},
  year         = {{2025}},
}

@article{58885,
  abstract     = {{There have been several attempts to conceptualize and operationalize pedagogical content knowledge (PCK) in the context of teachers' professional competencies. A recent and popular model is the Refined Consensus Model (RCM), which proposes a framework of dispositional competencies (personal PCK—pPCK) that influence more action-related competencies (enacted PCK—ePCK) and vice versa. However, descriptions of the internal structure of pPCK and possible knowledge domains that might develop independently are still limited, being either primarily theoretically motivated or strictly hierarchical and therefore of limited use, for example, for formative feedback and further development of the RCM. Meanwhile, a non-hierarchical differentiation for the ePCK regarding the plan-teach-reflect cycle has emerged. In this study, we present an exploratory computational approach to investigate pre-service teachers' pPCK for a similar non-hierarchical structure using a large dataset of responses to a pPCK questionnaire (N=846). We drew on theoretical foundations and previous empirical findings to achieve interpretability by integrating this external knowledge into our analyses using the Computational Grounded Theory (CGT) framework. The results of a cluster analysis of the pPCK scores indicate the emergence of prototypical groups, which we refer to as competency profiles: (1) a group with low performance, (2) a group with relatively advanced competency in using pPCK to create instructional elements, (3) a group with relatively advanced competency in using pPCK to assess and analyze described instructional elements, and (4) a group with high performance. These groups show tendencies for certain language usage, which we analyze using a structural topic model in a CGT-inspired pattern refinement step. We verify these patterns by demonstrating the ability of a machine learning model to predict the competency profile assignments. Finally, we discuss some implications of the results for the further development of the RCM and their potential usability for an automated formative assessment.}},
  author       = {{Zeller, Jannis and Riese, Josef}},
  issn         = {{1098-2736}},
  journal      = {{Journal of Research in Science Teaching}},
  keywords     = {{computational grounded theory, language analysis, machine learning, pedagogical content knowledge, unsupervised learning}},
  title        = {{{Competency Profiles of PCK Using Unsupervised Learning: What Implications for the Structures of pPCK Emerge From Non-Hierarchical Analyses?}}},
  doi          = {{10.1002/tea.70001}},
  year         = {{2025}},
}

@inproceedings{60892,
  abstract     = {{At Paderborn University, an AR-based app is being developed to prepare electrical engineering students for laboratory work. This paper aims to review the development of AR since 2010, particularly in technical university laboratories, through a systematic literature review. The study investigates AR's relevance in university teaching and examines specific AR applications in laboratory settings.
Using a mixed-method approach, the research first employs a web crawler to gather 27,249 articles from the Lens database, followed by a bibliometric analysis. Further, Google Scholar is used to find 374 articles related to AR in scientific and technical laboratories, with 51 significant ones evaluated for application areas, findings, and other criteria.
The findings show that AR in education is a growing trend, with a significant increase in publications and citations in recent years. Most studies focus on marker-based mobile AR applications, assessing aspects like motivation and user experience through surveys and interviews. However, there's limited research on AR's learning effectiveness in laboratories, partly due to the scarcity of technical equipment. One study found no significant learning impact from AR.}},
  author       = {{Alptekin, Mesut and Froese, Lennart and Temmen, Katrin}},
  booktitle    = {{Recent Trends of AI Technologies and Virtual Reality: Proceedings of 8th International Conference on Artificial Intelligence and Virtual Reality (AIVR 2024)}},
  keywords     = {{Augmented Reality, Mixed Reality, Literature Review, Bibliometric Analysis, Education \and Laboratories}},
  location     = {{Fukuoka, Japan}},
  pages        = {{427}},
  publisher    = {{Springer Nature}},
  title        = {{{Quantitative and Qualitative Literature Review of Augmented Reality in Teaching}}},
  volume       = {{432}},
  year         = {{2025}},
}

@article{61014,
  abstract     = {{<jats:p>To obtain a more comprehensive understanding of the specific properties of complex-shaped technical aerosols—such as partially sintered aggregates formed in combustion processes or structured particles resulting from complex synthesis processes—it is essential to measure more than a single equivalent size. This study examines a novel method for determining a two-dimensional distribution of two distinct particle properties within the size range from 50nm to 1000nm: the Centrifugal Differential Mobility Analyzer (CDMA). The CDMA enables the simultaneous measurement of both mobility and Stokes equivalent diameters, providing a detailed two-dimensional particle property distribution. This, in turn, allows for the extraction of shape-related information, which is essential for characterizing particles in terms of their chemical composition, reactivity, and other physicochemical properties. This paper presents a detailed evaluation of a first CDMA prototype. First, CFD simulations of the flow field within the classifier are presented in order to assess and understand non-idealities arising from the exact geometry. Subsequently, the transfer function is evaluated by particle trajectory calculations based on the simulated flow field. It can be demonstrated that the simulated transfer functions agree quite well with transfer functions derived from streamlines of an ideal flow field, indicating that the non-idealities in the classifying region are almost negligible in their effect on the classification result. An experimental determination of the transfer function shows additional effects not covered by the previous simulations, like broadening by diffusion and losses due to diffusion and precipitation within the in- and outlet of the classifier. Finally, the determined transfer functions are used to determine the full two-dimensional distribution with regard to the mobility and Stokes equivalent diameter of real aerosols, like spherical particles and aggregates at different sintering stages, respectively.</jats:p>}},
  author       = {{Rüther, Torben Norbert and Gröne, Sebastian and Dechert, Christopher and Schmid, Hans-Joachim}},
  issn         = {{2674-0516}},
  journal      = {{Powders}},
  keywords     = {{centrifugal differential mobility analysis, 2D-measurement, particle characterization, moving reference frame CFD-simulation, transfer function}},
  number       = {{2}},
  publisher    = {{MDPI AG}},
  title        = {{{Centrifugal Differential Mobility Analysis—Validation and First Two-Dimensional Measurements}}},
  doi          = {{10.3390/powders4020011}},
  volume       = {{4}},
  year         = {{2025}},
}

@article{57472,
  abstract     = {{In this paper we introduce, in a Hilbert space setting, a second order dynamical system with asymptotically vanishing damping and vanishing Tikhonov regularization that approaches a multiobjective optimization problem with convex and differentiable components of the objective function. Trajectory solutions are shown to exist in finite dimensions. We prove fast convergence of the function values, quantified in terms of a merit function. Based on the regime considered, we establish both weak and, in some cases, strong convergence of trajectory solutions toward a weak Pareto optimal solution. To achieve this, we apply Tikhonov regularization individually to each component of the objective function. This work extends results from single objective convex optimization into the multiobjective setting.}},
  author       = {{Bot, Radu Ioan and Sonntag, Konstantin}},
  journal      = {{Journal of Mathematical Analysis and Applications}},
  keywords     = {{Pareto optimization, Lyapunov analysis, gradient-like dynamical systems, inertial dynamics, asymptotic vanishing damping, Tikhonov regularization, strong convergence}},
  title        = {{{Inertial dynamics with vanishing Tikhonov regularization for multobjective optimization}}},
  year         = {{2025}},
}

@article{62111,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>The execution of incompatible actions imposes costs on action planning, commonly known as response-response incompatibility-costs. This phenomenon is also evident in sports: A basketball player who performs a pass in one direction whilst orienting the head into the contrary direction (pass with head fake) needs more time to initiate the action as if pass direction and head orientation are the same (pass without head fake).</jats:p>
          <jats:p>The aim of this study was twofold: First, we present a re-analysis of the data from Böer et al. (Psychological Research 88:523–524, 2024) using mixture effect modelling (Miller, Behavior Research Methods 38:92–106, 2006) explore if fake-production costs manifest continuously (uniform effect) in all participants or if some participants show fake-production costs occasionally but substantially (mixed effect). Second, we collected data of a control group which was analysed with the previous data of the practice group and fitted initiation times (ITs) to an ex-Gaussian distribution.</jats:p>
          <jats:p>The analysis of mixture effects revealed that most participants exhibited a uniform effect when they didn’t have time to mentally prepare the movement. This pattern was not changed by practice, suggesting fake-production costs can’t be overcome by practice alone without mental preparation time.</jats:p>
          <jats:p>The analysis of mean ITs revealed improvements in the practice group but not in the control group, independent of the type of pass performed. The distribution analyses complemented these findings as it showed that the improvement in participants’ performance with increasing practice can mainly be attributed to a reduction of the exponential part of the distribution (parameter tau).</jats:p>}},
  author       = {{Böer, Nils Tobias and Schütz, Christoph and Weigelt, Matthias and Güldenpenning, Iris}},
  issn         = {{0340-0727}},
  journal      = {{Psychological Research}},
  keywords     = {{Sport Psychology, Sport Science, Deception, Distribution Analysis}},
  number       = {{2}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{How does practice modulate fake-production costs in a basketball task? Analyses of frequency distributions and mixture effects}}},
  doi          = {{10.1007/s00426-025-02084-6}},
  volume       = {{89}},
  year         = {{2025}},
}

@misc{52663,
  abstract     = {{Context
Static analyses are well-established to aid in understanding bugs or vulnerabilities during the development process or in large-scale studies. A low false-positive rate is essential for the adaption in practice and for precise results of empirical studies. Unfortunately, static analyses tend to report where a vulnerability manifests rather than the fix location. This can cause presumed false positives or imprecise results.
Method
To address this problem, we designed an adaption of an existing static analysis algorithm that can distinguish between a manifestation and fix location, and reports error chains. An error chain represents at least two interconnected errors that occur successively, thus building the connection between the fix and manifestation location. We used our tool CogniCryptSUBS for a case study on 471 GitHub repositories, a performance benchmark to compare different analysis configurations, and conducted an expert interview.
Result
We found that 50 % of the projects with a report had at least one error chain. Our runtime benchmark demonstrated that our improvement caused only a minimal runtime overhead of less than 4 %. The results of our expert interview indicate that with our adapted version participants require fewer executions of the analysis.
Conclusion
Our results indicate that error chains occur frequently in real-world projects, and ignoring them can lead to imprecise evaluation results. The runtime benchmark indicates that our tool is a feasible and efficient solution for detecting error chains in real-world projects. Further, our results gave a hint that the usability of static analyses may benefit from supporting error chains.}},
  author       = {{Wickert, Anna-Katharina and Schlichtig, Michael and Vogel, Marvin and Winter, Lukas and Mezini, Mira and Bodden, Eric}},
  keywords     = {{Static analysis, error chains, false positive re- duction, empirical studies}},
  title        = {{{Supporting Error Chains in Static Analysis for Precise Evaluation Results and Enhanced Usability}}},
  year         = {{2024}},
}

@article{34114,
  abstract     = {{Qualitative comparative analysis (QCA) enables researchers in international management to better understand how the impact of a single explanatory factor depends on the context of other factors. But the analytical toolbox of QCA does not include a parameter for the explanatory power of a single explanatory factor or “condition”. In this paper, we therefore reinterpret the Banzhaf power index, originally developed in cooperative game theory, to establish a goodness-of-fit parameter in QCA. The relative Banzhaf index we suggest measures the explanatory power of one condition averaged across all sufficient combinations of conditions. The paper argues that the index is especially informative in three situations that are all salient in international management and call for a context-sensitive analysis of single conditions, namely substantial limited diversity in the data, the emergence of strong INUS conditions in the analysis, and theorizing with contingency factors. The paper derives the properties of the relative Banzhaf index in QCA, demonstrates how the index can be computed easily from a rudimentary truth table, and explores its insights by revisiting selected papers in international management that apply fuzzy-set QCA. It finally suggests a three-step procedure for utilizing the relative Banzhaf index when the causal structure involves both contingency effects and configurational causation.
}},
  author       = {{Haake, Claus-Jochen and Schneider, Martin}},
  journal      = {{Journal of International Management}},
  keywords     = {{Qualitative comparative analysis, Banzhaf power index, causality, explanatory power}},
  number       = {{2}},
  publisher    = {{Elsevier}},
  title        = {{{Playing games with QCA: Measuring the explanatory power of single conditions with the Banzhaf index}}},
  volume       = {{30}},
  year         = {{2024}},
}

@article{54548,
  author       = {{Prager, Raphael Patrick and Trautmann, Heike}},
  journal      = {{IEEE Transactions on Evolutionary Computation}},
  keywords     = {{Optimization, Evolutionary computation, Benchmark testing, Hyperparameter optimization, Portfolios, Extraterrestrial measurements, Dispersion, Exploratory landscape analysis, mixed-variable problem, mixed search spaces, automated algorithm selection}},
  pages        = {{1--1}},
  title        = {{{Exploratory Landscape Analysis for Mixed-Variable Problems}}},
  doi          = {{10.1109/TEVC.2024.3399560}},
  year         = {{2024}},
}

@article{32447,
  abstract     = {{We present a new gradient-like dynamical system related to unconstrained convex smooth multiobjective optimization which involves inertial effects and asymptotic vanishing damping. To the best of our knowledge, this system is the first inertial gradient-like system for multiobjective optimization problems including asymptotic vanishing damping, expanding the ideas previously laid out in [H. Attouch and G. Garrigos, Multiobjective Optimization: An Inertial Dynamical Approach to Pareto Optima, preprint, arXiv:1506.02823, 2015]. We prove existence of solutions to this system in finite dimensions and further prove that its bounded solutions converge weakly to weakly Pareto optimal points. In addition, we obtain a convergence rate of order \(\mathcal{O}(t^{-2})\) for the function values measured with a merit function. This approach presents a good basis for the development of fast gradient methods for multiobjective optimization.}},
  author       = {{Sonntag, Konstantin and Peitz, Sebastian}},
  issn         = {{1095-7189}},
  journal      = {{SIAM Journal on Optimization}},
  keywords     = {{multiobjective optimization, Pareto optimization, Lyapunov analysis, gradient-likedynamical systems, inertial dynamics, asymptotic vanishing damping, fast convergence}},
  number       = {{3}},
  pages        = {{2259 -- 2286}},
  publisher    = {{Society for Industrial and Applied Mathematics}},
  title        = {{{Fast Convergence of Inertial Multiobjective Gradient-Like Systems with Asymptotic Vanishing Damping}}},
  doi          = {{10.1137/23M1588512}},
  volume       = {{34}},
  year         = {{2024}},
}

@article{53300,
  author       = {{Brennecken, Dominik}},
  issn         = {{0022-247X}},
  journal      = {{Journal of Mathematical Analysis and Applications}},
  keywords     = {{Applied Mathematics, Analysis}},
  number       = {{2}},
  publisher    = {{Elsevier BV}},
  title        = {{{Hankel transform, K-Bessel functions and zeta distributions in the Dunkl setting}}},
  doi          = {{10.1016/j.jmaa.2024.128125}},
  volume       = {{535}},
  year         = {{2024}},
}

@article{58348,
  abstract     = {{<jats:p> Clinching is a mechanical joining technology, in which a mainly form-fit joint is created by means of local cold forming. To characterize the load-bearing behavior of such joints, they are typically analyzed destructively, for example by tensile-shear tests in combination with metallographic sections. However, both the initiation and progress of failure can only be described to a limited extent by this method. Furthermore, these tests allow only limited conclusions about clinch points under in-service loading. More purposefully, clinch points can be analyzed nondestructively by combining in-situ computed tomography (CT) and transient dynamic analysis (TDA). The TDA continuously measures the dynamic behavior of the specimen and indicates failure events like crack initiation, which then can be evaluated thoroughly by stopping the test and performing a CT scan. To qualify the TDA for this task, it is necessary to link the observed damage behavior with specific dynamic characteristics. In this work, the complementation of in-situ CT and TDA is investigated by testing a clinched single-lap tensile-shear specimen made of aluminum. The testing procedure is stepwise: at certain displacement levels, the specimen is investigated by in-situ CT and TDA. While the in-situ CT provides the location, extent, and development of the failure phenomena, the TDA uses this information to evaluate the dynamic signal and detect relevant frequency ranges, which indicate damage events. The results demonstrate, that failure initiation and progression can be analyzed efficiently by combining both measuring systems. The TDA reliably detects relevant signal changes in the monitored frequency band. By means of in-situ computed tomography, the corresponding failure phenomena can be described in detail, enhancing the understanding of the load-bearing and deformation behavior of clinch points. The concatenation of characteristic signal changes and observed failure phenomena can henceforth be transferred to analyze complex structures during operation nondestructively by TDA. </jats:p>}},
  author       = {{Reschke, Gregor and Köhler, Daniel and Kupfer, Robert and Troschitz, Juliane and Gude, Maik and Brosius, Alexander}},
  issn         = {{0954-4089}},
  journal      = {{Proceedings of the Institution of Mechanical Engineers, Part E: Journal of Process Mechanical Engineering}},
  keywords     = {{Clinching, Non-destructive testing, Transient Dynamic Analysis}},
  publisher    = {{SAGE Publications}},
  title        = {{{In-situ computed tomography and transient dynamic analysis – failure analysis of a single-lap tensile-shear test with clinch points}}},
  doi          = {{10.1177/09544089241251646}},
  year         = {{2024}},
}

@article{51374,
  author       = {{Hasler, David and Hinrichs, Benjamin and Siebert, Oliver}},
  issn         = {{0022-1236}},
  journal      = {{Journal of Functional Analysis}},
  keywords     = {{Analysis}},
  number       = {{7}},
  publisher    = {{Elsevier BV}},
  title        = {{{Non-Fock ground states in the translation-invariant Nelson model revisited non-perturbatively}}},
  doi          = {{10.1016/j.jfa.2024.110319}},
  volume       = {{286}},
  year         = {{2024}},
}

@inproceedings{52235,
  abstract     = {{Android applications collecting data from users must protect it according to the current legal frameworks. Such data protection has become even more important since the European Union rolled out the General Data Protection Regulation (GDPR). Since app developers are not legal experts, they find it difficult to write privacy-aware source code. Moreover, they have limited tool support to reason about data protection throughout their app development process.
This paper motivates the need for a static analysis approach to diagnose and explain data protection in Android apps. The analysis will recognize personal data sources in the source code, and aims to further examine the data flow originating from these sources. App developers can then address key questions about data manipulation, derived data, and the presence of technical measures. Despite challenges, we explore to what extent one can realize this analysis through static taint analysis, a common method for identifying security vulnerabilities. This is a first step towards designing a tool-based approach that aids app developers and assessors in ensuring data protection in Android apps, based on automated static program analysis. }},
  author       = {{Khedkar, Mugdha and Bodden, Eric}},
  booktitle    = {{Proceedings of the IEEE/ACM 11th International Conference on Mobile Software Engineering and Systems (MOBILESoft '24). Association for Computing Machinery, New York, NY, USA, 65–68.}},
  keywords     = {{static program analysis, data protection and privacy, GDPR compliance}},
  location     = {{Lisbon, Portugal}},
  title        = {{{Toward an Android Static Analysis Approach for Data Protection}}},
  doi          = {{10.1145/3647632.3651389}},
  year         = {{2024}},
}

@inproceedings{56918,
  abstract     = {{Joint value creation of organizations in ecosystems have a high failure rate, stressing the need for tools that enable the alignment of business models through visual inquiry. However, existing visual inquiry tools rarely consider recent design knowledge or ecosystem understanding. This leads to dissatisfied users and impedes the full realization of ecosystems’ potential. This short paper proposes an archaeological design science approach for enhancing the design of visual inquiry tools (e.g., a canvas) for ecosystems. Preliminary findings reveal 24 relevant artifacts, and shortcomings in the creation of conceptual models and rigorous evaluations. The proposed research process aims to develop design principles for more effective tools to bridge the gap between visual inquiry tools and ecosystems. This research contributes to design science research by reutilizing design knowledge and further developing the archaeological design approach. It also offers valuable information to practitioners about existing business model tools for the creation of ecosystems.}},
  author       = {{Vorbohle, Christian}},
  booktitle    = {{Proceedings of the Thirty-Second European Conference on Information Systems (ECIS 2024)}},
  keywords     = {{Design Science Research, Design Archaeology, Canvas Analysis, Business Model Tools}},
  location     = {{Paphos, Cyprus}},
  title        = {{{Bridging Boundaries: Enhancing Visual Inquiry Tools for Ecosystems through Design Archaeology}}},
  year         = {{2024}},
}

@inproceedings{61403,
  author       = {{Lohmer, Vivien and Kern, Friederike}},
  booktitle    = {{Second International Multimodal Communication Symposium (MMSYM) - Book of Abstract}},
  keywords     = {{gesture, explanations, conversation analysis}},
  location     = {{Goethe-Universität Frankfurt, Deutschland}},
  title        = {{{The role of interactive gestures in explanatory interactions}}},
  year         = {{2024}},
}

