@inproceedings{35426, author = {{Richter, Cedric and Haltermann, Jan Frederik and Jakobs, Marie-Christine and Pauck, Felix and Schott, Stefan and Wehrheim, Heike}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Are Neural Bug Detectors Comparable to Software Developers on Variable Misuse Bugs?}}}, doi = {{10.1145/3551349.3561156}}, year = {{2023}}, } @inproceedings{36848, author = {{Schott, Stefan and Pauck, Felix}}, booktitle = {{2022 IEEE 22nd International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, publisher = {{IEEE}}, title = {{{Benchmark Fuzzing for Android Taint Analyses}}}, doi = {{10.1109/scam55253.2022.00007}}, year = {{2023}}, } @inproceedings{35427, author = {{Pauck, Felix}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Scaling Arbitrary Android App Analyses}}}, doi = {{10.1145/3551349.3561339}}, year = {{2023}}, } @phdthesis{43108, author = {{Pauck, Felix}}, publisher = {{Paderborn University}}, title = {{{Cooperative Android App Analysis}}}, doi = {{10.17619/UNIPB/1-1698}}, year = {{2023}}, } @phdthesis{47833, author = {{König, Jürgen}}, title = {{{On the Membership and Correctness Problem for State Serializability and Value Opacity}}}, year = {{2023}}, } @inproceedings{32590, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE Conference on Software Testing, Verification and Validation (ICST)}}, pages = {{162--173}}, title = {{{Learning Realistic Mutations: Bug Creation for Neural Bug Detectors}}}, doi = {{10.1109/ICST53961.2022.00027}}, year = {{2022}}, } @inproceedings{32591, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE/ACM 19th International Conference on Mining Software Repositories (MSR)}}, pages = {{418--422}}, title = {{{TSSB-3M: Mining single statement bugs at massive scale}}}, doi = {{10.1145/3524842.3528505}}, year = {{2022}}, } @inproceedings{45248, author = {{Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{33rd International Conference on Concurrency Theory, CONCUR 2022, September 12-16, 2022, Warsaw, Poland}}, editor = {{Klin, Bartek and Lasota, Slawomir and Muscholl, Anca}}, pages = {{31:1–31:23}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum für Informatik}}, title = {{{Weak Progressive Forward Simulation Is Necessary and Sufficient for Strong Observational Refinement}}}, doi = {{10.4230/LIPIcs.CONCUR.2022.31}}, volume = {{243}}, year = {{2022}}, } @inproceedings{28350, abstract = {{In recent years, we observe an increasing amount of software with machine learning components being deployed. This poses the question of quality assurance for such components: how can we validate whether specified requirements are fulfilled by a machine learned software? Current testing and verification approaches either focus on a single requirement (e.g., fairness) or specialize on a single type of machine learning model (e.g., neural networks). In this paper, we propose property-driven testing of machine learning models. Our approach MLCheck encompasses (1) a language for property specification, and (2) a technique for systematic test case generation. The specification language is comparable to property-based testing languages. Test case generation employs advanced verification technology for a systematic, property dependent construction of test suites, without additional user supplied generator functions. We evaluate MLCheck using requirements and data sets from three different application areas (software discrimination, learning on knowledge graphs and security). Our evaluation shows that despite its generality MLCheck can even outperform specialised testing approaches while having a comparable runtime}}, author = {{Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille and Wehrheim, Heike}}, booktitle = {{Proceedings of the 20th IEEE International Conference on Machine Learning and Applications (ICMLA)}}, publisher = {{IEEE}}, title = {{{MLCHECK–Property-Driven Testing of Machine Learning Classifiers}}}, year = {{2021}}, } @article{27045, abstract = {{Due to the lack of established real-world benchmark suites for static taint analyses of Android applications, evaluations of these analyses are often restricted and hard to compare. Even in evaluations that do use real-world apps, details about the ground truth in those apps are rarely documented, which makes it difficult to compare and reproduce the results. To push Android taint analysis research forward, this paper thus recommends criteria for constructing real-world benchmark suites for this specific domain, and presents TaintBench, the first real-world malware benchmark suite with documented taint flows. TaintBench benchmark apps include taint flows with complex structures, and addresses static challenges that are commonly agreed on by the community. Together with the TaintBench suite, we introduce the TaintBench framework, whose goal is to simplify real-world benchmarking of Android taint analyses. First, a usability test shows that the framework improves experts’ performance and perceived usability when documenting and inspecting taint flows. Second, experiments using TaintBench reveal new insights for the taint analysis tools Amandroid and FlowDroid: (i) They are less effective on real-world malware apps than on synthetic benchmark apps. (ii) Predefined lists of sources and sinks heavily impact the tools’ accuracy. (iii) Surprisingly, up-to-date versions of both tools are less accurate than their predecessors.}}, author = {{Luo, Linghui and Pauck, Felix and Piskachev, Goran and Benz, Manuel and Pashchenko, Ivan and Mory, Martin and Bodden, Eric and Hermann, Ben and Massacci, Fabio}}, issn = {{1382-3256}}, journal = {{Empirical Software Engineering}}, title = {{{TaintBench: Automatic real-world malware benchmarking of Android taint analyses}}}, doi = {{10.1007/s10664-021-10013-5}}, year = {{2021}}, } @misc{22304, author = {{Schott, Stefan}}, title = {{{Android App Analysis Benchmark Case Generation}}}, year = {{2021}}, } @inproceedings{28199, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{2021 IEEE 21st International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, title = {{{Jicer: Simplifying Cooperative Android App Analysis Tasks}}}, doi = {{10.1109/scam52516.2021.00031}}, year = {{2021}}, } @inproceedings{21238, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Software Engineering 2021}}, editor = {{Koziolek, Anne and Schaefer, Ina and Seidl, Christoph}}, pages = {{ 83--84 }}, publisher = {{Gesellschaft für Informatik e.V.}}, title = {{{Cooperative Android App Analysis with CoDiDroid}}}, doi = {{10.18420/SE2021_30 }}, year = {{2021}}, } @inproceedings{19656, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the 32th IFIP International Conference on Testing Software and Systems (ICTSS)}}, publisher = {{Springer}}, title = {{{Automatic Fairness Testing of Machine Learning Models}}}, year = {{2020}}, } @misc{19999, author = {{Mayer, Stefan}}, publisher = {{Universität Paderborn}}, title = {{{Optimierung von JMCTest beim Testen von Inter Method Contracts}}}, year = {{2020}}, } @inproceedings{20274, author = {{Bila, Eleni and Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Techniques for Distributed Objects, Components, and Systems - 40th {IFIP} {WG} 6.1 International Conference, {FORTE} 2020, Held as Part of the 15th International Federated Conference on Distributed Computing Techniques, DisCoTec 2020, Valletta, Malta, June 15-19, 2020, Proceedings}}, editor = {{Gotsman, Alexey and Sokolova, Ana}}, pages = {{39--58}}, publisher = {{Springer}}, title = {{{Defining and Verifying Durable Opacity: Correctness for Persistent Software Transactional Memory}}}, doi = {{10.1007/978-3-030-50086-3\_3}}, volume = {{12136}}, year = {{2020}}, } @inproceedings{20275, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Proceedings of the 15th International Conference on Software Technologies, {ICSOFT} 2020, Lieusaint, Paris, France, July 7-9, 2020}}, editor = {{van Sinderen, Marten and Fill, Hans{-}Georg and A. Maciaszek, Leszek}}, pages = {{15--26}}, publisher = {{ScitePress}}, title = {{{Consistency Analysis of AUTOSAR Timing Requirements}}}, doi = {{10.5220/0009766600150026}}, year = {{2020}}, } @inproceedings{20276, author = {{Beyer, Dirk and Wehrheim, Heike}}, booktitle = {{Leveraging Applications of Formal Methods, Verification and Validation: Verification Principles - 9th International Symposium on Leveraging Applications of Formal Methods, ISoLA 2020, Rhodes, Greece, October 20-30, 2020, Proceedings, Part {I}}}, editor = {{Margaria, Tiziana and Steffen, Bernhard}}, pages = {{143--167}}, publisher = {{Springer}}, title = {{{Verification Artifacts in Cooperative Verification: Survey and Unifying Component Framework}}}, doi = {{10.1007/978-3-030-61362-4\_8}}, volume = {{12476}}, year = {{2020}}, } @proceedings{20277, editor = {{Wehrheim, Heike and Cabot, Jordi}}, isbn = {{978-3-030-45233-9}}, publisher = {{Springer}}, title = {{{Fundamental Approaches to Software Engineering - 23rd International Conference, FASE 2020, Held as Part of the European Joint Conferences on Theory and Practice of Software, ETAPS 2020, Dublin, Ireland, April 25-30, 2020, Proceedings}}}, doi = {{10.1007/978-3-030-45234-6}}, volume = {{12076}}, year = {{2020}}, } @proceedings{20278, editor = {{Ahrendt, Wolfgang and Wehrheim, Heike}}, isbn = {{978-3-030-50994-1}}, publisher = {{Springer}}, title = {{{Tests and Proofs - 14th International Conference, TAP@STAF 2020, Bergen, Norway, June 22-23, 2020, Proceedings [postponed]}}}, doi = {{10.1007/978-3-030-50995-8}}, volume = {{12165}}, year = {{2020}}, } @article{20279, author = {{Sharma, Arnab and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Testing Monotonicity of Machine Learning Models}}}, volume = {{abs/2002.12278}}, year = {{2020}}, } @article{21016, author = {{Dalvandi, Sadegh and Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike}}, journal = {{Dagstuhl Artifacts Ser.}}, number = {{2}}, pages = {{15:1--15:2}}, title = {{{Owicki-Gries Reasoning for C11 RAR (Artifact)}}}, doi = {{10.4230/DARTS.6.2.15}}, volume = {{6}}, year = {{2020}}, } @inproceedings{21017, author = {{Dalvandi, Sadegh and Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike}}, booktitle = {{34th European Conference on Object-Oriented Programming, {ECOOP} 2020, November 15-17, 2020, Berlin, Germany (Virtual Conference)}}, editor = {{Hirschfeld, Robert and Pape, Tobias}}, pages = {{11:1--11:26}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik}}, title = {{{Owicki-Gries Reasoning for C11 RAR}}}, doi = {{10.4230/LIPIcs.ECOOP.2020.11}}, volume = {{166}}, year = {{2020}}, } @inproceedings{21018, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{35th {IEEE/ACM} International Conference on Automated Software Engineering, {ASE} 2020, Melbourne, Australia, September 21-25, 2020}}, pages = {{1016--1028}}, publisher = {{{IEEE}}}, title = {{{Attend and Represent: A Novel View on Algorithm Selection for Software Verification}}}, year = {{2020}}, } @proceedings{21019, editor = {{Ahrendt, Wolfgang and Wehrheim, Heike}}, isbn = {{978-3-030-50994-1}}, publisher = {{Springer}}, title = {{{Tests and Proofs - 14th International Conference, TAP@STAF 2020, Bergen, Norway, June 22-23, 2020, Proceedings [postponed]}}}, doi = {{10.1007/978-3-030-50995-8}}, volume = {{12165}}, year = {{2020}}, } @unpublished{17825, abstract = {{Software verification has recently made enormous progress due to the development of novel verification methods and the speed-up of supporting technologies like SMT solving. To keep software verification tools up to date with these advances, tool developers keep on integrating newly designed methods into their tools, almost exclusively by re-implementing the method within their own framework. While this allows for a conceptual re-use of methods, it requires novel implementations for every new technique. In this paper, we employ cooperative verification in order to avoid reimplementation and enable usage of novel tools as black-box components in verification. Specifically, cooperation is employed for the core ingredient of software verification which is invariant generation. Finding an adequate loop invariant is key to the success of a verification run. Our framework named CoVerCIG allows a master verification tool to delegate the task of invariant generation to one or several specialized helper invariant generators. Their results are then utilized within the verification run of the master verifier, allowing in particular for crosschecking the validity of the invariant. We experimentally evaluate our framework on an instance with two masters and three different invariant generators using a number of benchmarks from SV-COMP 2020. The experiments show that the use of CoVerCIG can increase the number of correctly verified tasks without increasing the used resources}}, author = {{Haltermann, Jan Frederik and Wehrheim, Heike}}, booktitle = {{arXiv:2008.04551}}, title = {{{Cooperative Verification via Collective Invariant Generation}}}, year = {{2020}}, } @inproceedings{16724, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the ACM SIGSOFT International Symposium on Software Testing and Analysis (ISSTA).}}, publisher = {{ACM}}, title = {{{Higher Income, Larger Loan? Monotonicity Testing of Machine Learning Models}}}, year = {{2020}}, } @article{16725, author = {{Richter, Cedric and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, journal = {{Journal of Automated Software Engineering}}, publisher = {{Springer}}, title = {{{Algorithm Selection for Software Validation Based on Graph Kernels}}}, year = {{2020}}, } @article{13770, author = {{Karl, Holger and Kundisch, Dennis and Meyer auf der Heide, Friedhelm and Wehrheim, Heike}}, journal = {{Business & Information Systems Engineering}}, number = {{6}}, pages = {{467--481}}, publisher = {{Springer}}, title = {{{A Case for a New IT Ecosystem: On-The-Fly Computing}}}, doi = {{10.1007/s12599-019-00627-x}}, volume = {{62}}, year = {{2020}}, } @inproceedings{16214, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria}}, editor = {{Felderer, Michael and Hasselbring, Wilhelm and Rabiser, Rick and Jung, Reiner}}, pages = {{123--124}}, publisher = {{Gesellschaft f{\"{u}}r Informatik e.V.}}, title = {{{Reproducing Taint-Analysis Results with ReproDroid}}}, doi = {{10.18420/SE2020_36}}, year = {{2020}}, } @inproceedings{3287, abstract = {{For optimal placement and orchestration of network services, it is crucial that their structure and semantics are specified clearly and comprehensively and are available to an orchestrator. Existing specification approaches are either ambiguous or miss important aspects regarding the behavior of virtual network functions (VNFs) forming a service. We propose to formally and unambiguously specify the behavior of these functions and services using Queuing Petri Nets (QPNs). QPNs are an established method that allows to express queuing, synchronization, stochastically distributed processing delays, and changing traffic volume and characteristics at each VNF. With QPNs, multiple VNFs can be connected to complete network services in any structure, even specifying bidirectional network services containing loops. We discuss how management and orchestration systems can benefit from our clear and comprehensive specification approach, leading to better placement of VNFs and improved Quality of Service. Another benefit of formally specifying network services with QPNs are diverse analysis options, which allow valuable insights such as the distribution of end-to-end delay. We propose a tool-based workflow that supports the specification of network services and the automatic generation of corresponding simulation code to enable an in-depth analysis of their behavior and performance.}}, author = {{Schneider, Stefan Balthasar and Sharma, Arnab and Karl, Holger and Wehrheim, Heike}}, booktitle = {{2019 IFIP/IEEE International Symposium on Integrated Network Management (IM)}}, location = {{Washington, DC, USA}}, pages = {{116----124}}, publisher = {{IFIP}}, title = {{{Specifying and Analyzing Virtual Network Services Using Queuing Petri Nets}}}, year = {{2019}}, } @inproceedings{7752, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the Software Engineering Conference (SE)}}, isbn = {{978-3-88579-686-2}}, location = {{Stuttgart}}, pages = {{157 -- 158}}, publisher = {{Gesellschaft für Informatik e.V. (GI)}}, title = {{{Testing Balancedness of ML Algorithms}}}, volume = {{P-292}}, year = {{2019}}, } @misc{7623, author = {{Zhang, Shikun}}, pages = {{64}}, publisher = {{Universität Paderborn}}, title = {{{Combining Android Apps for Analysis Purposes}}}, year = {{2019}}, } @inproceedings{7635, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{IEEE International Conference on Software Testing, Verification and Validation (ICST)}}, location = {{Xi'an, China, April, 2019}}, pages = {{125----135}}, publisher = {{IEEE}}, title = {{{Testing Machine Learning Algorithms for Balanced Data Usage}}}, year = {{2019}}, } @misc{12885, author = {{Haltermann, Jan Frederik}}, title = {{{Analyzing Data Usage in Array Programs}}}, year = {{2019}}, } @inproceedings{15838, abstract = {{In the field of software analysis a trade-off between scalability and accuracy always exists. In this respect, Android app analysis is no exception, in particular, analyzing large or many apps can be challenging. Dealing with many small apps is a typical challenge when facing micro-benchmarks such as DROIDBENCH or ICC-BENCH. These particular benchmarks are not only used for the evaluation of novel tools but also in continuous integration pipelines of existing mature tools to maintain and guarantee a certain quality-level. Considering this latter usage it becomes very important to be able to achieve benchmark results as fast as possible. Hence, benchmarks have to be optimized for this purpose. One approach to do so is app merging. We implemented the Android Merge Tool (AMT) following this approach and show that its novel aspects can be used to produce scaled up and accurate benchmarks. For such benchmarks Android app analysis tools do not suffer from the scalability-accuracy trade-off anymore. We show this throughout detailed experiments on DROIDBENCH employing three different analysis tools (AMANDROID, ICCTA, FLOWDROID). Benchmark execution times are largely reduced without losing benchmark accuracy. Moreover, we argue why AMT is an advantageous successor of the state-of-the-art app merging tool (APKCOMBINER) in analysis lift-up scenarios.}}, author = {{Pauck, Felix and Zhang, Shikun}}, booktitle = {{2019 34th IEEE/ACM International Conference on Automated Software Engineering Workshop (ASEW)}}, isbn = {{9781728141367}}, keywords = {{Program Analysis, Android App Analysis, Taint Analysis, App Merging, Benchmark}}, title = {{{Android App Merging for Benchmark Speed-Up and Analysis Lift-Up}}}, doi = {{10.1109/asew.2019.00019}}, year = {{2019}}, } @inproceedings{16215, author = {{Derrick, John and Doherty, Simon and Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Methods - The Next 30 Years - Third World Congress, {FM} 2019, Porto, Portugal, October 7-11, 2019, Proceedings}}, editor = {{H. ter Beek, Maurice and McIver, Annabelle and N. Oliveira, Jos{\'{e}}}}, pages = {{179--195}}, publisher = {{Springer}}, title = {{{Verifying Correctness of Persistent Concurrent Data Structures}}}, doi = {{10.1007/978-3-030-30942-8\_12}}, volume = {{11800}}, year = {{2019}}, } @article{16216, author = {{Russo, Alessandra and Schürr, Andy and Wehrheim, Heike}}, journal = {{Formal Asp. Comput.}}, number = {{5}}, pages = {{457--458}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-019-00495-y}}, volume = {{31}}, year = {{2019}}, } @article{16217, author = {{Fränzle, Martin and Kapur, Deepak and Wehrheim, Heike and Zhan, Naijun}}, journal = {{Formal Asp. Comput.}}, number = {{1}}, pages = {{1}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-018-00477-6}}, volume = {{31}}, year = {{2019}}, } @inbook{13872, author = {{Beyer, Dirk and Jakobs, Marie-Christine}}, booktitle = {{Fundamental Approaches to Software Engineering}}, isbn = {{9783030167219}}, issn = {{0302-9743}}, title = {{{CoVeriTest: Cooperative Verifier-Based Testing}}}, doi = {{10.1007/978-3-030-16722-6_23}}, year = {{2019}}, } @inproceedings{13993, author = {{Derrick, John and Doherty, Simon and Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Methods - The Next 30 Years - Third World Congress, {FM} 2019, Porto, Portugal, October 7-11, 2019, Proceedings}}, pages = {{179--195}}, title = {{{Verifying Correctness of Persistent Concurrent Data Structures}}}, doi = {{10.1007/978-3-030-30942-8\_12}}, year = {{2019}}, } @article{10011, author = {{Fränzle, Martin and Kapur, Deepak and Wehrheim, Heike and Zhan, Naijun}}, journal = {{Formal Asp. Comput.}}, number = {{1}}, pages = {{1}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-018-00477-6}}, volume = {{31}}, year = {{2019}}, } @inproceedings{10091, author = {{König, Jürgen and Wehrheim, Heike}}, booktitle = {{{NASA} Formal Methods - 11th International Symposium, {NFM} 2019, Houston, TX, USA, May 7-9, 2019, Proceedings}}, editor = {{M. Badger, Julia and Yvonne Rozier, Kristin}}, pages = {{263--279}}, publisher = {{Springer}}, title = {{{Data Independence for Software Transactional Memory}}}, doi = {{10.1007/978-3-030-20652-9\_18}}, volume = {{11460}}, year = {{2019}}, } @inproceedings{10092, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{Proceedings of the 24th {ACM} {SIGPLAN} Symposium on Principles and Practice of Parallel Programming, PPoPP 2019, Washington, DC, USA, February 16-20, 2019}}, editor = {{K. Hollingsworth, Jeffrey and Keidar, Idit}}, pages = {{355--365}}, publisher = {{{ACM}}}, title = {{{Verifying C11 programs operationally}}}, doi = {{10.1145/3293883.3295702}}, year = {{2019}}, } @inproceedings{10093, author = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}}, booktitle = {{Software Engineering and Software Management (SE/SWM 2019), Stuttgart, Germany, February 18-22, 2019}}, editor = {{Becker, Steffen and Bogicevic, Ivan and Herzwurm, Georg and Wagner, Stefan}}, pages = {{151----152}}, publisher = {{GI}}, title = {{{Combining Verifiers in Conditional Model Checking via Reducers}}}, doi = {{10.18420/se2019-46}}, volume = {{P-292}}, year = {{2019}}, } @inproceedings{10094, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Software Engineering and Software Management, {SE/SWM} 2019, Stuttgart, Germany, February 18-22, 2019}}, editor = {{Becker, Steffen and Bogicevic, Ivan and Herzwurm, Georg and Wagner, Stefan}}, pages = {{157--158}}, publisher = {{{GI}}}, title = {{{Testing Balancedness of ML Algorithms}}}, doi = {{10.18420/se2019-48}}, volume = {{{P-292}}}, year = {{2019}}, } @inproceedings{10095, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{Tools and Algorithms for the Construction and Analysis of Systems - 25 Years of {TACAS:} TOOLympics, Held as Part of {ETAPS} 2019, Prague, Czech Republic, April 6-11, 2019, Proceedings, Part {III}}}, editor = {{Beyer, Dirk and Huisman, Marieke and Kordon, Fabrice and Steffen, Bernhard}}, pages = {{229--233}}, publisher = {{Springer}}, title = {{{PeSCo: Predicting Sequential Combinations of Verifiers - (Competition Contribution)}}}, doi = {{10.1007/978-3-030-17502-3_19}}, volume = {{11429}}, year = {{2019}}, } @misc{10105, author = {{Haltermann, Jan}}, publisher = {{Universität Paderborn}}, title = {{{Analyzing Data Usage in Array Programs}}}, year = {{2019}}, } @inproceedings{10108, abstract = {{Recent years have seen the development of numerous tools for the analysis of taint flows in Android apps. Taint analyses aim at detecting data leaks, accidentally or by purpose programmed into apps. Often, such tools specialize in the treatment of specific features impeding precise taint analysis (like reflection or inter-app communication). This multitude of tools, their specific applicability and their various combination options complicate the selection of a tool (or multiple tools) when faced with an analysis instance, even for knowledgeable users, and hence hinders the successful adoption of taint analyses. In this work, we thus present CoDiDroid, a framework for cooperative Android app analysis. CoDiDroid (1) allows users to ask questions about flows in apps in varying degrees of detail, (2) automatically generates subtasks for answering such questions, (3) distributes tasks onto analysis tools (currently DroidRA, FlowDroid, HornDroid, IC3 and two novel tools) and (4) at the end merges tool answers on subtasks into an overall answer. Thereby, users are freed from having to learn about the use and functionality of all these tools while still being able to leverage their capabilities. Moreover, we experimentally show that cooperation among tools pays off with respect to effectiveness, precision and scalability.}}, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Proceedings of the 2019 27th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering}}, isbn = {{978-1-4503-5572-8}}, keywords = {{Android Taint Analysis, Cooperation, Precision, Tools}}, pages = {{374--384}}, title = {{{Together Strong: Cooperative Android App Analysis}}}, doi = {{10.1145/3338906.3338915}}, year = {{2019}}, } @inproceedings{13874, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Tests and Proofs - 13th International Conference, {TAP} 2019, Held as Part of the Third World Congress on Formal Methods 2019, Porto, Portugal, October 9-11, 2019, Proceedings}}, pages = {{3--20}}, title = {{{When Are Software Verification Results Valid for Approximate Hardware?}}}, doi = {{10.1007/978-3-030-31157-5_1}}, year = {{2019}}, }