@inproceedings{35426, author = {{Richter, Cedric and Haltermann, Jan Frederik and Jakobs, Marie-Christine and Pauck, Felix and Schott, Stefan and Wehrheim, Heike}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Are Neural Bug Detectors Comparable to Software Developers on Variable Misuse Bugs?}}}, doi = {{10.1145/3551349.3561156}}, year = {{2023}}, } @inproceedings{36848, author = {{Schott, Stefan and Pauck, Felix}}, booktitle = {{2022 IEEE 22nd International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, publisher = {{IEEE}}, title = {{{Benchmark Fuzzing for Android Taint Analyses}}}, doi = {{10.1109/scam55253.2022.00007}}, year = {{2023}}, } @inproceedings{35427, author = {{Pauck, Felix}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Scaling Arbitrary Android App Analyses}}}, doi = {{10.1145/3551349.3561339}}, year = {{2023}}, } @phdthesis{43108, author = {{Pauck, Felix}}, publisher = {{Paderborn University}}, title = {{{Cooperative Android App Analysis}}}, doi = {{10.17619/UNIPB/1-1698}}, year = {{2023}}, } @inbook{45888, author = {{Wehrheim, Heike and Platzner, Marco and Bodden, Eric and Schubert, Philipp and Pauck, Felix and Jakobs, Marie-Christine}}, booktitle = {{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}, editor = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{125--144}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{Verifying Software and Reconfigurable Hardware Services}}}, doi = {{10.5281/zenodo.8068583}}, volume = {{412}}, year = {{2023}}, } @article{27045, abstract = {{Due to the lack of established real-world benchmark suites for static taint analyses of Android applications, evaluations of these analyses are often restricted and hard to compare. Even in evaluations that do use real-world apps, details about the ground truth in those apps are rarely documented, which makes it difficult to compare and reproduce the results. To push Android taint analysis research forward, this paper thus recommends criteria for constructing real-world benchmark suites for this specific domain, and presents TaintBench, the first real-world malware benchmark suite with documented taint flows. TaintBench benchmark apps include taint flows with complex structures, and addresses static challenges that are commonly agreed on by the community. Together with the TaintBench suite, we introduce the TaintBench framework, whose goal is to simplify real-world benchmarking of Android taint analyses. First, a usability test shows that the framework improves experts’ performance and perceived usability when documenting and inspecting taint flows. Second, experiments using TaintBench reveal new insights for the taint analysis tools Amandroid and FlowDroid: (i) They are less effective on real-world malware apps than on synthetic benchmark apps. (ii) Predefined lists of sources and sinks heavily impact the tools’ accuracy. (iii) Surprisingly, up-to-date versions of both tools are less accurate than their predecessors.}}, author = {{Luo, Linghui and Pauck, Felix and Piskachev, Goran and Benz, Manuel and Pashchenko, Ivan and Mory, Martin and Bodden, Eric and Hermann, Ben and Massacci, Fabio}}, issn = {{1382-3256}}, journal = {{Empirical Software Engineering}}, title = {{{TaintBench: Automatic real-world malware benchmarking of Android taint analyses}}}, doi = {{10.1007/s10664-021-10013-5}}, year = {{2021}}, } @inproceedings{28199, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{2021 IEEE 21st International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, title = {{{Jicer: Simplifying Cooperative Android App Analysis Tasks}}}, doi = {{10.1109/scam52516.2021.00031}}, year = {{2021}}, } @article{27841, abstract = {{Verification of software and processor hardware usually proceeds separately, software analysis relying on the correctness of processors executing machine instructions. This assumption is valid as long as the software runs on standard CPUs that have been extensively validated and are in wide use. However, for processors exploiting custom instruction set extensions to meet performance and energy constraints the validation might be less extensive, challenging the correctness assumption. In this paper we present a novel formal approach for hardware/software co-verification targeting processors with custom instruction set extensions. We detail two different approaches for checking whether the hardware fulfills the requirements expected by the software analysis. The approaches are designed to explore a trade-off between generality of the verification and computational effort. Then, we describe the integration of software and hardware analyses for both techniques and describe a fully automated tool chain implementing the approaches. Finally, we demonstrate and compare the two approaches on example source code with custom instructions, using state-of-the-art software analysis and hardware verification techniques.}}, author = {{Jakobs, Marie-Christine and Pauck, Felix and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}}, journal = {{IEEE Access}}, keywords = {{Software Analysis, Abstract Interpretation, Custom Instruction, Hardware Verification}}, publisher = {{IEEE}}, title = {{{Software/Hardware Co-Verification for Custom Instruction Set Processors}}}, doi = {{10.1109/ACCESS.2021.3131213}}, year = {{2021}}, } @inproceedings{21238, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Software Engineering 2021}}, editor = {{Koziolek, Anne and Schaefer, Ina and Seidl, Christoph}}, pages = {{ 83--84 }}, publisher = {{Gesellschaft für Informatik e.V.}}, title = {{{Cooperative Android App Analysis with CoDiDroid}}}, doi = {{10.18420/SE2021_30 }}, year = {{2021}}, } @inproceedings{16214, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria}}, editor = {{Felderer, Michael and Hasselbring, Wilhelm and Rabiser, Rick and Jung, Reiner}}, pages = {{123--124}}, publisher = {{Gesellschaft f{\"{u}}r Informatik e.V.}}, title = {{{Reproducing Taint-Analysis Results with ReproDroid}}}, doi = {{10.18420/SE2020_36}}, year = {{2020}}, } @inproceedings{15838, abstract = {{In the field of software analysis a trade-off between scalability and accuracy always exists. In this respect, Android app analysis is no exception, in particular, analyzing large or many apps can be challenging. Dealing with many small apps is a typical challenge when facing micro-benchmarks such as DROIDBENCH or ICC-BENCH. These particular benchmarks are not only used for the evaluation of novel tools but also in continuous integration pipelines of existing mature tools to maintain and guarantee a certain quality-level. Considering this latter usage it becomes very important to be able to achieve benchmark results as fast as possible. Hence, benchmarks have to be optimized for this purpose. One approach to do so is app merging. We implemented the Android Merge Tool (AMT) following this approach and show that its novel aspects can be used to produce scaled up and accurate benchmarks. For such benchmarks Android app analysis tools do not suffer from the scalability-accuracy trade-off anymore. We show this throughout detailed experiments on DROIDBENCH employing three different analysis tools (AMANDROID, ICCTA, FLOWDROID). Benchmark execution times are largely reduced without losing benchmark accuracy. Moreover, we argue why AMT is an advantageous successor of the state-of-the-art app merging tool (APKCOMBINER) in analysis lift-up scenarios.}}, author = {{Pauck, Felix and Zhang, Shikun}}, booktitle = {{2019 34th IEEE/ACM International Conference on Automated Software Engineering Workshop (ASEW)}}, isbn = {{9781728141367}}, keywords = {{Program Analysis, Android App Analysis, Taint Analysis, App Merging, Benchmark}}, title = {{{Android App Merging for Benchmark Speed-Up and Analysis Lift-Up}}}, doi = {{10.1109/asew.2019.00019}}, year = {{2019}}, } @inproceedings{10108, abstract = {{Recent years have seen the development of numerous tools for the analysis of taint flows in Android apps. Taint analyses aim at detecting data leaks, accidentally or by purpose programmed into apps. Often, such tools specialize in the treatment of specific features impeding precise taint analysis (like reflection or inter-app communication). This multitude of tools, their specific applicability and their various combination options complicate the selection of a tool (or multiple tools) when faced with an analysis instance, even for knowledgeable users, and hence hinders the successful adoption of taint analyses. In this work, we thus present CoDiDroid, a framework for cooperative Android app analysis. CoDiDroid (1) allows users to ask questions about flows in apps in varying degrees of detail, (2) automatically generates subtasks for answering such questions, (3) distributes tasks onto analysis tools (currently DroidRA, FlowDroid, HornDroid, IC3 and two novel tools) and (4) at the end merges tool answers on subtasks into an overall answer. Thereby, users are freed from having to learn about the use and functionality of all these tools while still being able to leverage their capabilities. Moreover, we experimentally show that cooperation among tools pays off with respect to effectiveness, precision and scalability.}}, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Proceedings of the 2019 27th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering}}, isbn = {{978-1-4503-5572-8}}, keywords = {{Android Taint Analysis, Cooperation, Precision, Tools}}, pages = {{374--384}}, title = {{{Together Strong: Cooperative Android App Analysis}}}, doi = {{10.1145/3338906.3338915}}, year = {{2019}}, } @inproceedings{13874, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Tests and Proofs - 13th International Conference, {TAP} 2019, Held as Part of the Third World Congress on Formal Methods 2019, Porto, Portugal, October 9-11, 2019, Proceedings}}, pages = {{3--20}}, title = {{{When Are Software Verification Results Valid for Approximate Hardware?}}}, doi = {{10.1007/978-3-030-31157-5_1}}, year = {{2019}}, } @unpublished{2711, abstract = {{In recent years, researchers have developed a number of tools to conduct taint analysis of Android applications. While all the respective papers aim at providing a thorough empirical evaluation, comparability is hindered by varying or unclear evaluation targets. Sometimes, the apps used for evaluation are not precisely described. In other cases, authors use an established benchmark but cover it only partially. In yet other cases, the evaluations differ in terms of the data leaks searched for, or lack a ground truth to compare against. All those limitations make it impossible to truly compare the tools based on those published evaluations. We thus present ReproDroid, a framework allowing the accurate comparison of Android taint analysis tools. ReproDroid supports researchers in inferring the ground truth for data leaks in apps, in automatically applying tools to benchmarks, and in evaluating the obtained results. We use ReproDroid to comparatively evaluate on equal grounds the six prominent taint analysis tools Amandroid, DIALDroid, DidFail, DroidSafe, FlowDroid and IccTA. The results are largely positive although four tools violate some promises concerning features and accuracy. Finally, we contribute to the area of unbiased benchmarking with a new and improved version of the open test suite DroidBench.}}, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{arXiv:1804.02903}}, title = {{{Do Android Taint Analysis Tools Keep their Promises?}}}, year = {{2018}}, } @inproceedings{4999, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering - ESEC/FSE 2018}}, isbn = {{9781450355735}}, publisher = {{ACM Press}}, title = {{{Do Android taint analysis tools keep their promises?}}}, doi = {{10.1145/3236024.3236029}}, year = {{2018}}, } @article{1043, abstract = {{Approximate computing (AC) is an emerging paradigm for energy-efficient computation. The basic idea of AC is to sacrifice high precision for low energy by allowing hardware to carry out “approximately correct” calculations. This provides a major challenge for software quality assurance: programs successfully verified to be correct might be erroneous on approximate hardware. In this letter, we present a novel approach for determining under what conditions a software verification result is valid for approximate hardware. To this end, we compute the allowed tolerances for AC hardware from successful verification runs. More precisely, we derive a set of constraints which—when met by the AC hardware—guarantees the verification result to carry over to AC. On the practical side, we furthermore: 1) show how to extract tolerances from verification runs employing predicate abstraction as verification technology and 2) show how to check such constraints on hardware designs. We have implemented all techniques, and exemplify them on example C programs and a number of recently proposed approximate adders.}}, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, issn = {{1943-0663}}, journal = {{IEEE Embedded Systems Letters}}, pages = {{22--25}}, publisher = {{Institute of Electrical and Electronics Engineers (IEEE)}}, title = {{{Validity of Software Verification Results on Approximate Hardware}}}, doi = {{10.1109/LES.2017.2758200}}, year = {{2018}}, } @misc{109, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Cooperative static analysis of Android applications}}}, year = {{2017}}, } @inproceedings{170, abstract = {{We present PAndA2, an extendable, static analysis tool for Android apps which examines permission related security threats like overprivilege, existence of permission redelegation and permission flows. PAndA2 comes along with a textual and graphical visualization of the analysis result and even supports the comparison of analysis results for different android app versions.}}, author = {{Jakobs, Marie-Christine and Töws, Manuel and Pauck, Felix}}, booktitle = {{Workshop on Formal and Model-Driven Techniques for Developing Trustworthy Systems}}, editor = {{Ishikawa F, Romanovsky A, Troubitsyna E}}, title = {{{PAndA 2 : Analyzing Permission Use and Interplay in Android Apps (Tool Paper)}}}, year = {{2016}}, } @misc{418, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Generierung von Eigenschaftsprüfern in einem Hardware/Software-Co-Verifikationsverfahren}}}, year = {{2014}}, }