@article{51374, author = {{Hasler, David and Hinrichs, Benjamin and Siebert, Oliver}}, issn = {{0022-1236}}, journal = {{Journal of Functional Analysis}}, keywords = {{Analysis}}, number = {{7}}, publisher = {{Elsevier BV}}, title = {{{Non-Fock ground states in the translation-invariant Nelson model revisited non-perturbatively}}}, doi = {{10.1016/j.jfa.2024.110319}}, volume = {{286}}, year = {{2024}}, } @inproceedings{52235, abstract = {{Android applications collecting data from users must protect it according to the current legal frameworks. Such data protection has become even more important since the European Union rolled out the General Data Protection Regulation (GDPR). Since app developers are not legal experts, they find it difficult to write privacy-aware source code. Moreover, they have limited tool support to reason about data protection throughout their app development process. This paper motivates the need for a static analysis approach to diagnose and explain data protection in Android apps. The analysis will recognize personal data sources in the source code, and aims to further examine the data flow originating from these sources. App developers can then address key questions about data manipulation, derived data, and the presence of technical measures. Despite challenges, we explore to what extent one can realize this analysis through static taint analysis, a common method for identifying security vulnerabilities. This is a first step towards designing a tool-based approach that aids app developers and assessors in ensuring data protection in Android apps, based on automated static program analysis. }}, author = {{Khedkar, Mugdha and Bodden, Eric}}, booktitle = {{Proceedings of the 9th International Conference on Mobile Software Engineering and Systems}}, keywords = {{static program analysis, data protection and privacy, GDPR compliance}}, location = {{Lisbon, Portugal}}, title = {{{Toward an Android Static Analysis Approach for Data Protection}}}, year = {{2024}}, } @misc{52663, abstract = {{Context Static analyses are well-established to aid in understanding bugs or vulnerabilities during the development process or in large-scale studies. A low false-positive rate is essential for the adaption in practice and for precise results of empirical studies. Unfortunately, static analyses tend to report where a vulnerability manifests rather than the fix location. This can cause presumed false positives or imprecise results. Method To address this problem, we designed an adaption of an existing static analysis algorithm that can distinguish between a manifestation and fix location, and reports error chains. An error chain represents at least two interconnected errors that occur successively, thus building the connection between the fix and manifestation location. We used our tool CogniCryptSUBS for a case study on 471 GitHub repositories, a performance benchmark to compare different analysis configurations, and conducted an expert interview. Result We found that 50 % of the projects with a report had at least one error chain. Our runtime benchmark demonstrated that our improvement caused only a minimal runtime overhead of less than 4 %. The results of our expert interview indicate that with our adapted version participants require fewer executions of the analysis. Conclusion Our results indicate that error chains occur frequently in real-world projects, and ignoring them can lead to imprecise evaluation results. The runtime benchmark indicates that our tool is a feasible and efficient solution for detecting error chains in real-world projects. Further, our results gave a hint that the usability of static analyses may benefit from supporting error chains.}}, author = {{Wickert, Anna-Katharina and Schlichtig, Michael and Vogel, Marvin and Winter, Lukas and Mezini, Mira and Bodden, Eric}}, keywords = {{Static analysis, error chains, false positive re- duction, empirical studies}}, title = {{{Supporting Error Chains in Static Analysis for Precise Evaluation Results and Enhanced Usability}}}, year = {{2024}}, } @inproceedings{36522, abstract = {{Jupyter notebooks enable developers to interleave code snippets with rich-text and in-line visualizations. Data scientists use Jupyter notebook as the de-facto standard for creating and sharing machine-learning based solutions, primarily written in Python. Recent studies have demonstrated, however, that a large portion of Jupyter notebooks available on public platforms are undocumented and lacks a narrative structure. This reduces the readability of these notebooks. To address this shortcoming, this paper presents HeaderGen, a novel tool-based approach that automatically annotates code cells with categorical markdown headers based on a taxonomy of machine-learning operations, and classifies and displays function calls according to this taxonomy. For this functionality to be realized, HeaderGen enhances an existing call graph analysis in PyCG. To improve precision, HeaderGen extends PyCG's analysis with support for handling external library code and flow-sensitivity. The former is realized by facilitating the resolution of function return-types. Furthermore, HeaderGen uses type information to perform pattern matching on code syntax to annotate code cells. The evaluation on 15 real-world Jupyter notebooks from Kaggle shows that HeaderGen's underlying call graph analysis yields high accuracy (96.4% precision and 95.9% recall). This is because HeaderGen can resolve return-types of external libraries where existing type inference tools such as pytype (by Google), pyright (by Microsoft), and Jedi fall short. The header generation has a precision of 82.2% and a recall rate of 96.8% with regard to headers created manually by experts. In a user study, HeaderGen helps participants finish comprehension and navigation tasks faster. All participants clearly perceive HeaderGen as useful to their task.}}, author = {{Shivarpatna Venkatesh, Ashwin Prasad and Wang, Jiawei and Li, Li and Bodden, Eric}}, keywords = {{static analysis, python, code comprehension, annotation, literate programming, jupyter notebook}}, publisher = {{IEEE SANER 2023 (International Conference on Software Analysis, Evolution and Reengineering)}}, title = {{{Enhancing Comprehension and Navigation in Jupyter Notebooks with Static Analysis}}}, doi = {{10.48550/ARXIV.2301.04419}}, year = {{2023}}, } @article{43105, author = {{Black, Tobias and Fuest, Mario and Lankeit, Johannes and Mizukami, Masaaki}}, issn = {{1468-1218}}, journal = {{Nonlinear Analysis: Real World Applications}}, keywords = {{Applied Mathematics, Computational Mathematics, General Economics, Econometrics and Finance, General Engineering, General Medicine, Analysis}}, publisher = {{Elsevier BV}}, title = {{{Possible points of blow-up in chemotaxis systems with spatially heterogeneous logistic source}}}, doi = {{10.1016/j.nonrwa.2023.103868}}, volume = {{73}}, year = {{2023}}, } @inproceedings{44390, abstract = {{The development of autonomous vehicles and their introduction in urban traffic offer many opportunities for traffic improvements. In this paper, an approach for a future traffic control system for mixed autonomy traffic environments is presented. Furthermore, a simulation framework based on the city of Paderborn is introduced to enable the development and examination of such a system. This encompasses multiple elements including the road network itself, traffic lights, sensors as well as methods to analyse the topology of the network. Furthermore, a procedure for traffic demand generation and routing is presented based on statistical data of the city and traffic data obtained by measurements. The resulting model can receive and apply the generated control inputs and in turn generates simulated sensor data for the control system based on the current system state.}}, author = {{Link, Christopher and Malena, Kevin and Gausemeier, Sandra and Trächtler, Ansgar}}, booktitle = {{Proceedings of the 9th International Conference on Vehicle Technology and Intelligent Transport Systems}}, isbn = {{978-989-758-652-1}}, keywords = {{Traffic Simulation, Traffic Control, Car2X, Mixed Autonomy, Autonomous Vehicles, SUMO, Sensor Simulation, Traffic Demand Generation, Routing, Traffic Lights, Graph Analysis, Traffic Observer}}, location = {{Prague, Czech Republic}}, publisher = {{SCITEPRESS - Science and Technology Publications}}, title = {{{Simulation Environment for Traffic Control Systems Targeting Mixed Autonomy Traffic Scenarios}}}, doi = {{10.5220/0011987600003479}}, year = {{2023}}, } @article{46100, author = {{Hinrichs, Benjamin and Janssen, Daan W. and Ziebell, Jobst}}, issn = {{0022-247X}}, journal = {{Journal of Mathematical Analysis and Applications}}, keywords = {{Applied Mathematics, Analysis}}, number = {{1}}, publisher = {{Elsevier BV}}, title = {{{Super-Gaussian decay of exponentials: A sufficient condition}}}, doi = {{10.1016/j.jmaa.2023.127558}}, volume = {{528}}, year = {{2023}}, } @article{29240, abstract = {{The principle of least action is one of the most fundamental physical principle. It says that among all possible motions connecting two points in a phase space, the system will exhibit those motions which extremise an action functional. Many qualitative features of dynamical systems, such as the presence of conservation laws and energy balance equations, are related to the existence of an action functional. Incorporating variational structure into learning algorithms for dynamical systems is, therefore, crucial in order to make sure that the learned model shares important features with the exact physical system. In this paper we show how to incorporate variational principles into trajectory predictions of learned dynamical systems. The novelty of this work is that (1) our technique relies only on discrete position data of observed trajectories. Velocities or conjugate momenta do not need to be observed or approximated and no prior knowledge about the form of the variational principle is assumed. Instead, they are recovered using backward error analysis. (2) Moreover, our technique compensates discretisation errors when trajectories are computed from the learned system. This is important when moderate to large step-sizes are used and high accuracy is required. For this, we introduce and rigorously analyse the concept of inverse modified Lagrangians by developing an inverse version of variational backward error analysis. (3) Finally, we introduce a method to perform system identification from position observations only, based on variational backward error analysis.}}, author = {{Ober-Blöbaum, Sina and Offen, Christian}}, issn = {{0377-0427}}, journal = {{Journal of Computational and Applied Mathematics}}, keywords = {{Lagrangian learning, variational backward error analysis, modified Lagrangian, variational integrators, physics informed learning}}, pages = {{114780}}, publisher = {{Elsevier}}, title = {{{Variational Learning of Euler–Lagrange Dynamics from Data}}}, doi = {{10.1016/j.cam.2022.114780}}, volume = {{421}}, year = {{2023}}, } @article{29236, abstract = {{The numerical solution of an ordinary differential equation can be interpreted as the exact solution of a nearby modified equation. Investigating the behaviour of numerical solutions by analysing the modified equation is known as backward error analysis. If the original and modified equation share structural properties, then the exact and approximate solution share geometric features such as the existence of conserved quantities. Conjugate symplectic methods preserve a modified symplectic form and a modified Hamiltonian when applied to a Hamiltonian system. We show how a blended version of variational and symplectic techniques can be used to compute modified symplectic and Hamiltonian structures. In contrast to other approaches, our backward error analysis method does not rely on an ansatz but computes the structures systematically, provided that a variational formulation of the method is known. The technique is illustrated on the example of symmetric linear multistep methods with matrix coefficients.}}, author = {{McLachlan, Robert and Offen, Christian}}, journal = {{Journal of Geometric Mechanics}}, keywords = {{variational integrators, backward error analysis, Euler--Lagrange equations, multistep methods, conjugate symplectic methods}}, number = {{1}}, pages = {{98--115}}, publisher = {{AIMS Press}}, title = {{{Backward error analysis for conjugate symplectic methods}}}, doi = {{10.3934/jgm.2023005}}, volume = {{15}}, year = {{2023}}, } @article{34114, abstract = {{Qualitative comparative analysis (QCA) enables researchers in international management to better understand how the impact of a single explanatory factor depends on the context of other factors. But the analytical toolbox of QCA does not include a parameter for the explanatory power of a single explanatory factor or “condition”. In this paper, we therefore reinterpret the Banzhaf power index, originally developed in cooperative game theory, to establish a goodness-of-fit parameter in QCA. The relative Banzhaf index we suggest measures the explanatory power of one condition averaged across all sufficient combinations of conditions. The paper argues that the index is especially informative in three situations that are all salient in international management and call for a context-sensitive analysis of single conditions, namely substantial limited diversity in the data, the emergence of strong INUS conditions in the analysis, and theorizing with contingency factors. The paper derives the properties of the relative Banzhaf index in QCA, demonstrates how the index can be computed easily from a rudimentary truth table, and explores its insights by revisiting selected papers in international management that apply fuzzy-set QCA. It finally suggests a three-step procedure for utilizing the relative Banzhaf index when the causal structure involves both contingency effects and configurational causation. }}, author = {{Haake, Claus-Jochen and Schneider, Martin}}, journal = {{Journal of International Management}}, keywords = {{Qualitative comparative analysis, Banzhaf power index, causality, explanatory power}}, publisher = {{Elsevier}}, title = {{{Playing games with QCA: Measuring the explanatory power of single conditions with the Banzhaf index}}}, year = {{2023}}, } @article{47065, abstract = {{The reform of the European academic landscape with the introduction of bachelor's and master's degree programs has brought about several profound changes for teaching and assessment in higher education. With regard to the examination system, the shift towards output-oriented teaching is still one of the most significant challenges. Assessments have to be integrated into the teaching and learning arrangements and consistently aligned towards the intended learning outcomes. In particular, assessments should provide valid evidence that learners have acquired competences that are relevant for a specific domain. However, it seems that this didactic goal has not yet been fully achieved in modeling education in computer science. The aim of this study is to investigate whether typical task material used in exercises and exams in modeling education at selected German universities covers relevant competences required for graphical modeling. For this purpose, typical tasks in the field of modeling are first identified by means of a content-analytical procedure. Subsequently, it is determined which competence facets relevant for graphical modeling are addressed by the task types. By contrasting a competence model for modeling with the competences addressed by the tasks, a gap was identified between the required competences and the task material analyzed. In particular, the gap analysis shows the neglect of transversal competence facets as well as those related to the analysis and evaluation of models. The result of this paper is a classification of task types for modeling education and a specification of the competence facets addressed by these tasks. Recommendations for developing and assessing student's competences comprehensively are given.}}, author = {{Soyka, Chantal and Ullrich, Meike and Striewe, Michael and Schaper, Niclas}}, journal = {{Enterprise Modelling and Information Systems Architectures}}, keywords = {{conceptual modeling, higher education, competence-oriented assessment, task analysis, graphical modeling}}, title = {{{Comparison of Required Competences and Task Material in Modeling Education}}}, doi = {{10.18417/EMISA.18.7}}, volume = {{18}}, year = {{2023}}, } @article{47155, abstract = {{»Über Geld spricht man nicht.« Diese Benimmregel erweist sich beim Blick in die Ver- gangenheit als Illusion. Keineswegs war Geld grundsätzlich ein Tabuthema zwischen- menschlicher Kommunikation. Ganz im Gegenteil: Mit dem Reden über Geld wird dieses mit Bedeutung aufgeladen. Die Autor*innen dieses Themenheftes untersuch en Gelddis- kurse und die Zuschreibungen von Bedeutung an Geld in der internationalen Geschichte.}}, author = {{Schönhärl, Korinna and Schotters, Frederike and Thiemeyer, Guido}}, journal = {{Werkstatt Geschichte}}, keywords = {{Financial History, Discourse Analysis, History of Money}}, title = {{{Editorial}}}, volume = {{88}}, year = {{2023}}, } @misc{47154, abstract = {{»Über Geld spricht man nicht.« Diese Benimmregel erweist sich beim Blick in die Vergangenheit als Illusion. Keineswegs war Geld grundsätzlich ein Tabuthema zwischenmenschlicher Kommunikation. Ganz im Gegenteil: Mit dem Reden über Geld wird dieses mit Bedeutung aufgeladen. Die Autor*innen dieses Themenheftes untersuchen Gelddiskurse und die Zuschreibungen von Bedeutung an Geld in der internationalen Geschichte.}}, booktitle = {{Werkstatt Geschichte}}, editor = {{Schönhärl, Korinna and Schotters, Frederike and Thiemeyer, Guido}}, keywords = {{Financial History, Discourse Analysis, History of Money}}, title = {{{Reden über Geld}}}, volume = {{88}}, year = {{2023}}, } @inproceedings{47522, abstract = {{Artificial benchmark functions are commonly used in optimization research because of their ability to rapidly evaluate potential solutions, making them a preferred substitute for real-world problems. However, these benchmark functions have faced criticism for their limited resemblance to real-world problems. In response, recent research has focused on automatically generating new benchmark functions for areas where established test suites are inadequate. These approaches have limitations, such as the difficulty of generating new benchmark functions that exhibit exploratory landscape analysis (ELA) features beyond those of existing benchmarks.The objective of this work is to develop a method for generating benchmark functions for single-objective continuous optimization with user-specified structural properties. Specifically, we aim to demonstrate a proof of concept for a method that uses an ELA feature vector to specify these properties in advance. To achieve this, we begin by generating a random sample of decision space variables and objective values. We then adjust the objective values using CMA-ES until the corresponding features of our new problem match the predefined ELA features within a specified threshold. By iteratively transforming the landscape in this way, we ensure that the resulting function exhibits the desired properties. To create the final function, we use the resulting point cloud as training data for a simple neural network that produces a function exhibiting the target ELA features. We demonstrate the effectiveness of this approach by replicating the existing functions of the well-known BBOB suite and creating new functions with ELA feature values that are not present in BBOB.}}, author = {{Prager, Raphael Patrick and Dietrich, Konstantin and Schneider, Lennart and Schäpermeier, Lennart and Bischl, Bernd and Kerschke, Pascal and Trautmann, Heike and Mersmann, Olaf}}, booktitle = {{Proceedings of the 17th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}}, isbn = {{9798400702020}}, keywords = {{Benchmarking, Instance Generator, Black-Box Continuous Optimization, Exploratory Landscape Analysis, Neural Networks}}, pages = {{129–139}}, publisher = {{Association for Computing Machinery}}, title = {{{Neural Networks as Black-Box Benchmark Functions Optimized for Exploratory Landscape Features}}}, doi = {{10.1145/3594805.3607136}}, year = {{2023}}, } @inproceedings{44326, abstract = {{Low-quality models that miss relevant dynamics lead to major challenges in modelbased state estimation. We address this issue by simultaneously estimating the system’s states and its model inaccuracies by a square root unscented Kalman filter (SRUKF). Concretely, we augment the state with the parameter vector of a linear combination containing suitable functions that approximate the lacking dynamics. Presuming that only a few dynamical terms are relevant, the parameter vector is claimed to be sparse. In Bayesian setting, properties like sparsity are expressed by a prior distribution. One common choice for sparsity is a Laplace distribution. However, due to disadvantages of a Laplacian prior in regards to the SRUKF, the regularized horseshoe distribution, a Gaussian that approximately features sparsity, is applied instead. Results exhibit small estimation errors with model improvements detected by an automated model reduction technique.}}, author = {{Götte, Ricarda-Samantha and Timmermann, Julia}}, booktitle = {{IFAC-PapersOnLine}}, keywords = {{joint estimation, unscented Kalman filter, sparsity, Laplacian prior, regularized horseshoe, principal component analysis}}, location = {{Yokohama, Japan}}, number = {{2}}, pages = {{869--874}}, title = {{{Approximating a Laplacian Prior for Joint State and Model Estimation within an UKF}}}, volume = {{56}}, year = {{2023}}, } @inproceedings{48872, abstract = {{Quality diversity (QD) is a branch of evolutionary computation that gained increasing interest in recent years. The Map-Elites QD approach defines a feature space, i.e., a partition of the search space, and stores the best solution for each cell of this space. We study a simple QD algorithm in the context of pseudo-Boolean optimisation on the "number of ones" feature space, where the ith cell stores the best solution amongst those with a number of ones in [(i - 1)k, ik - 1]. Here k is a granularity parameter 1 {$\leq$} k {$\leq$} n+1. We give a tight bound on the expected time until all cells are covered for arbitrary fitness functions and for all k and analyse the expected optimisation time of QD on OneMax and other problems whose structure aligns favourably with the feature space. On combinatorial problems we show that QD finds a (1 - 1/e)-approximation when maximising any monotone sub-modular function with a single uniform cardinality constraint efficiently. Defining the feature space as the number of connected components of a connected graph, we show that QD finds a minimum spanning tree in expected polynomial time.}}, author = {{Bossek, Jakob and Sudholt, Dirk}}, booktitle = {{Proceedings of the Genetic and Evolutionary Computation Conference}}, isbn = {{9798400701191}}, keywords = {{quality diversity, runtime analysis}}, pages = {{1546–1554}}, publisher = {{Association for Computing Machinery}}, title = {{{Runtime Analysis of Quality Diversity Algorithms}}}, doi = {{10.1145/3583131.3590383}}, year = {{2023}}, } @inproceedings{48886, abstract = {{Generating new instances via evolutionary methods is commonly used to create new benchmarking data-sets, with a focus on attempting to cover an instance-space as completely as possible. Recent approaches have exploited Quality-Diversity methods to evolve sets of instances that are both diverse and discriminatory with respect to a portfolio of solvers, but these methods can be challenging when attempting to find diversity in a high-dimensional feature-space. We address this issue by training a model based on Principal Component Analysis on existing instances to create a low-dimension projection of the high-dimension feature-vectors, and then apply Novelty Search directly in the new low-dimension space. We conduct experiments to evolve diverse and discriminatory instances of Knapsack Problems, comparing the use of Novelty Search in the original feature-space to using Novelty Search in a low-dimensional projection, and repeat over a given set of dimensions. We find that the methods are complementary: if treated as an ensemble, they collectively provide increased coverage of the space. Specifically, searching for novelty in a low-dimension space contributes 56% of the filled regions of the space, while searching directly in the feature-space covers the remaining 44%.}}, author = {{Marrero, Alejandro and Segredo, Eduardo and Hart, Emma and Bossek, Jakob and Neumann, Aneta}}, booktitle = {{Proceedings of the Genetic} and Evolutionary Computation Conference}}, isbn = {{9798400701191}}, keywords = {{evolutionary computation, instance generation, instance-space analysis, knapsack problem, novelty search}}, pages = {{312–320}}, publisher = {{Association for Computing Machinery}}, title = {{{Generating Diverse and Discriminatory Knapsack Instances by Searching for Novelty in Variable Dimensions of Feature-Space}}}, doi = {{10.1145/3583131.3590504}}, year = {{2023}}, } @article{48871, abstract = {{Most runtime analyses of randomised search heuristics focus on the expected number of function evaluations to find a unique global optimum. We ask a fundamental question: if additional search points are declared optimal, or declared as desirable target points, do these additional optima speed up evolutionary algorithms? More formally, we analyse the expected hitting time of a target set OPT{$\cup$}S where S is a set of non-optimal search points and OPT is the set of optima and compare it to the expected hitting time of OPT. We show that the answer to our question depends on the number and placement of search points in S. For all black-box algorithms and all fitness functions with polynomial expected optimisation times we show that, if additional optima are placed randomly, even an exponential number of optima has a negligible effect on the expected optimisation time. Considering Hamming balls around all global optima gives an easier target for some algorithms and functions and can shift the phase transition with respect to offspring population sizes in the (1,{$\lambda$}) EA on OneMax. However, for the one-dimensional Ising model the time to reach Hamming balls of radius (1/2-{$ϵ$})n around optima does not reduce the asymptotic expected optimisation time in the worst case. Finally, on functions where search trajectories typically join in a single search point, turning one search point into an optimum drastically reduces the expected optimisation time.}}, author = {{Bossek, Jakob and Sudholt, Dirk}}, issn = {{0304-3975}}, journal = {{Theoretical Computer Science}}, keywords = {{Evolutionary algorithms, pseudo-Boolean functions, runtime analysis}}, pages = {{113757}}, title = {{{Do Additional Target Points Speed Up Evolutionary Algorithms?}}}, doi = {{10.1016/j.tcs.2023.113757}}, year = {{2023}}, } @inproceedings{37058, abstract = {{Digital technologies have made the line of visibility more transparent, enabling customers to get deeper insights into an organization’s core operations than ever before. This creates new challenges for organizations trying to consistently deliver high-quality customer experiences. In this paper we conduct an empirical analysis of customers’ preferences and their willingness-to-pay for different degrees of process transparency, using the example of digitally-enabled business-to-customer delivery services. Applying conjoint analysis, we quantify customers’ preferences and willingness-to-pay for different service attributes and levels. Our contributions are two-fold: For research, we provide empirical measurements of customers’ preferences and their willingness-to-pay for process transparency, suggesting that more is not always better. Additionally, we provide a blueprint of how conjoint analysis can be applied to study design decisions regarding changing an organization’s digital line of visibility. For practice, our findings enable service managers to make decisions about process transparency and establishing different levels of service quality. }}, author = {{Brennig, Katharina and Müller, Oliver}}, booktitle = {{Hawaii International Conference on System Sciences}}, keywords = {{Digital Services, Line of Visibility, Process Transparency, Customer Preferences, Conjoint Analysis}}, location = {{Lāhainā}}, title = {{{More Isn’t Always Better – Measuring Customers’ Preferences for Digital Process Transparency}}}, year = {{2023}}, } @inproceedings{44146, abstract = {{Many Android applications collect data from users. When they do, they must protect this collected data according to the current legal frameworks. Such data protection has become even more important since the European Union rolled out the General Data Protection Regulation (GDPR). App developers have limited tool support to reason about data protection throughout their app development process. Although many Android applications state a privacy policy, privacy policy compliance checks are currently manual, expensive, and prone to error. One of the major challenges in privacy audits is the significant gap between legal privacy statements (in English text) and technical measures that Android apps use to protect their user's privacy. In this thesis, we will explore to what extent we can use static analysis to answer important questions regarding data protection. Our main goal is to design a tool based approach that aids app developers and auditors in ensuring data protection in Android applications, based on automated static program analysis.}}, author = {{Khedkar, Mugdha}}, booktitle = {{Proceedings of the 45th International Conference on Software Engineering: Companion Proceedings (ICSE ‘23)}}, keywords = {{static analysis, data protection and privacy, GDPR compliance}}, title = {{{Static Analysis for Android GDPR Compliance Assurance}}}, doi = {{10.1109/ICSE-Companion58688.2023.00054}}, year = {{2023}}, }