@inproceedings{35426, author = {{Richter, Cedric and Haltermann, Jan Frederik and Jakobs, Marie-Christine and Pauck, Felix and Schott, Stefan and Wehrheim, Heike}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Are Neural Bug Detectors Comparable to Software Developers on Variable Misuse Bugs?}}}, doi = {{10.1145/3551349.3561156}}, year = {{2023}}, } @inproceedings{36848, author = {{Schott, Stefan and Pauck, Felix}}, booktitle = {{2022 IEEE 22nd International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, publisher = {{IEEE}}, title = {{{Benchmark Fuzzing for Android Taint Analyses}}}, doi = {{10.1109/scam55253.2022.00007}}, year = {{2023}}, } @inproceedings{35427, author = {{Pauck, Felix}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Scaling Arbitrary Android App Analyses}}}, doi = {{10.1145/3551349.3561339}}, year = {{2023}}, } @phdthesis{43108, author = {{Pauck, Felix}}, publisher = {{Paderborn University}}, title = {{{Cooperative Android App Analysis}}}, doi = {{10.17619/UNIPB/1-1698}}, year = {{2023}}, } @phdthesis{47833, author = {{König, Jürgen}}, title = {{{On the Membership and Correctness Problem for State Serializability and Value Opacity}}}, year = {{2023}}, } @inproceedings{32590, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE Conference on Software Testing, Verification and Validation (ICST)}}, pages = {{162--173}}, title = {{{Learning Realistic Mutations: Bug Creation for Neural Bug Detectors}}}, doi = {{10.1109/ICST53961.2022.00027}}, year = {{2022}}, } @inproceedings{32591, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE/ACM 19th International Conference on Mining Software Repositories (MSR)}}, pages = {{418--422}}, title = {{{TSSB-3M: Mining single statement bugs at massive scale}}}, doi = {{10.1145/3524842.3528505}}, year = {{2022}}, } @inproceedings{45248, author = {{Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{33rd International Conference on Concurrency Theory, CONCUR 2022, September 12-16, 2022, Warsaw, Poland}}, editor = {{Klin, Bartek and Lasota, Slawomir and Muscholl, Anca}}, pages = {{31:1–31:23}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum für Informatik}}, title = {{{Weak Progressive Forward Simulation Is Necessary and Sufficient for Strong Observational Refinement}}}, doi = {{10.4230/LIPIcs.CONCUR.2022.31}}, volume = {{243}}, year = {{2022}}, } @inproceedings{28350, abstract = {{In recent years, we observe an increasing amount of software with machine learning components being deployed. This poses the question of quality assurance for such components: how can we validate whether specified requirements are fulfilled by a machine learned software? Current testing and verification approaches either focus on a single requirement (e.g., fairness) or specialize on a single type of machine learning model (e.g., neural networks). In this paper, we propose property-driven testing of machine learning models. Our approach MLCheck encompasses (1) a language for property specification, and (2) a technique for systematic test case generation. The specification language is comparable to property-based testing languages. Test case generation employs advanced verification technology for a systematic, property dependent construction of test suites, without additional user supplied generator functions. We evaluate MLCheck using requirements and data sets from three different application areas (software discrimination, learning on knowledge graphs and security). Our evaluation shows that despite its generality MLCheck can even outperform specialised testing approaches while having a comparable runtime}}, author = {{Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille and Wehrheim, Heike}}, booktitle = {{Proceedings of the 20th IEEE International Conference on Machine Learning and Applications (ICMLA)}}, publisher = {{IEEE}}, title = {{{MLCHECK–Property-Driven Testing of Machine Learning Classifiers}}}, year = {{2021}}, } @article{27045, abstract = {{Due to the lack of established real-world benchmark suites for static taint analyses of Android applications, evaluations of these analyses are often restricted and hard to compare. Even in evaluations that do use real-world apps, details about the ground truth in those apps are rarely documented, which makes it difficult to compare and reproduce the results. To push Android taint analysis research forward, this paper thus recommends criteria for constructing real-world benchmark suites for this specific domain, and presents TaintBench, the first real-world malware benchmark suite with documented taint flows. TaintBench benchmark apps include taint flows with complex structures, and addresses static challenges that are commonly agreed on by the community. Together with the TaintBench suite, we introduce the TaintBench framework, whose goal is to simplify real-world benchmarking of Android taint analyses. First, a usability test shows that the framework improves experts’ performance and perceived usability when documenting and inspecting taint flows. Second, experiments using TaintBench reveal new insights for the taint analysis tools Amandroid and FlowDroid: (i) They are less effective on real-world malware apps than on synthetic benchmark apps. (ii) Predefined lists of sources and sinks heavily impact the tools’ accuracy. (iii) Surprisingly, up-to-date versions of both tools are less accurate than their predecessors.}}, author = {{Luo, Linghui and Pauck, Felix and Piskachev, Goran and Benz, Manuel and Pashchenko, Ivan and Mory, Martin and Bodden, Eric and Hermann, Ben and Massacci, Fabio}}, issn = {{1382-3256}}, journal = {{Empirical Software Engineering}}, title = {{{TaintBench: Automatic real-world malware benchmarking of Android taint analyses}}}, doi = {{10.1007/s10664-021-10013-5}}, year = {{2021}}, } @misc{22304, author = {{Schott, Stefan}}, title = {{{Android App Analysis Benchmark Case Generation}}}, year = {{2021}}, } @inproceedings{28199, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{2021 IEEE 21st International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, title = {{{Jicer: Simplifying Cooperative Android App Analysis Tasks}}}, doi = {{10.1109/scam52516.2021.00031}}, year = {{2021}}, } @inproceedings{21238, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Software Engineering 2021}}, editor = {{Koziolek, Anne and Schaefer, Ina and Seidl, Christoph}}, pages = {{ 83--84 }}, publisher = {{Gesellschaft für Informatik e.V.}}, title = {{{Cooperative Android App Analysis with CoDiDroid}}}, doi = {{10.18420/SE2021_30 }}, year = {{2021}}, } @inproceedings{19656, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the 32th IFIP International Conference on Testing Software and Systems (ICTSS)}}, publisher = {{Springer}}, title = {{{Automatic Fairness Testing of Machine Learning Models}}}, year = {{2020}}, } @misc{19999, author = {{Mayer, Stefan}}, publisher = {{Universität Paderborn}}, title = {{{Optimierung von JMCTest beim Testen von Inter Method Contracts}}}, year = {{2020}}, } @inproceedings{20274, author = {{Bila, Eleni and Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Techniques for Distributed Objects, Components, and Systems - 40th {IFIP} {WG} 6.1 International Conference, {FORTE} 2020, Held as Part of the 15th International Federated Conference on Distributed Computing Techniques, DisCoTec 2020, Valletta, Malta, June 15-19, 2020, Proceedings}}, editor = {{Gotsman, Alexey and Sokolova, Ana}}, pages = {{39--58}}, publisher = {{Springer}}, title = {{{Defining and Verifying Durable Opacity: Correctness for Persistent Software Transactional Memory}}}, doi = {{10.1007/978-3-030-50086-3\_3}}, volume = {{12136}}, year = {{2020}}, } @inproceedings{20275, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Proceedings of the 15th International Conference on Software Technologies, {ICSOFT} 2020, Lieusaint, Paris, France, July 7-9, 2020}}, editor = {{van Sinderen, Marten and Fill, Hans{-}Georg and A. Maciaszek, Leszek}}, pages = {{15--26}}, publisher = {{ScitePress}}, title = {{{Consistency Analysis of AUTOSAR Timing Requirements}}}, doi = {{10.5220/0009766600150026}}, year = {{2020}}, } @inproceedings{20276, author = {{Beyer, Dirk and Wehrheim, Heike}}, booktitle = {{Leveraging Applications of Formal Methods, Verification and Validation: Verification Principles - 9th International Symposium on Leveraging Applications of Formal Methods, ISoLA 2020, Rhodes, Greece, October 20-30, 2020, Proceedings, Part {I}}}, editor = {{Margaria, Tiziana and Steffen, Bernhard}}, pages = {{143--167}}, publisher = {{Springer}}, title = {{{Verification Artifacts in Cooperative Verification: Survey and Unifying Component Framework}}}, doi = {{10.1007/978-3-030-61362-4\_8}}, volume = {{12476}}, year = {{2020}}, } @proceedings{20277, editor = {{Wehrheim, Heike and Cabot, Jordi}}, isbn = {{978-3-030-45233-9}}, publisher = {{Springer}}, title = {{{Fundamental Approaches to Software Engineering - 23rd International Conference, FASE 2020, Held as Part of the European Joint Conferences on Theory and Practice of Software, ETAPS 2020, Dublin, Ireland, April 25-30, 2020, Proceedings}}}, doi = {{10.1007/978-3-030-45234-6}}, volume = {{12076}}, year = {{2020}}, } @proceedings{20278, editor = {{Ahrendt, Wolfgang and Wehrheim, Heike}}, isbn = {{978-3-030-50994-1}}, publisher = {{Springer}}, title = {{{Tests and Proofs - 14th International Conference, TAP@STAF 2020, Bergen, Norway, June 22-23, 2020, Proceedings [postponed]}}}, doi = {{10.1007/978-3-030-50995-8}}, volume = {{12165}}, year = {{2020}}, } @article{20279, author = {{Sharma, Arnab and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Testing Monotonicity of Machine Learning Models}}}, volume = {{abs/2002.12278}}, year = {{2020}}, } @article{21016, author = {{Dalvandi, Sadegh and Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike}}, journal = {{Dagstuhl Artifacts Ser.}}, number = {{2}}, pages = {{15:1--15:2}}, title = {{{Owicki-Gries Reasoning for C11 RAR (Artifact)}}}, doi = {{10.4230/DARTS.6.2.15}}, volume = {{6}}, year = {{2020}}, } @inproceedings{21017, author = {{Dalvandi, Sadegh and Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike}}, booktitle = {{34th European Conference on Object-Oriented Programming, {ECOOP} 2020, November 15-17, 2020, Berlin, Germany (Virtual Conference)}}, editor = {{Hirschfeld, Robert and Pape, Tobias}}, pages = {{11:1--11:26}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik}}, title = {{{Owicki-Gries Reasoning for C11 RAR}}}, doi = {{10.4230/LIPIcs.ECOOP.2020.11}}, volume = {{166}}, year = {{2020}}, } @inproceedings{21018, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{35th {IEEE/ACM} International Conference on Automated Software Engineering, {ASE} 2020, Melbourne, Australia, September 21-25, 2020}}, pages = {{1016--1028}}, publisher = {{{IEEE}}}, title = {{{Attend and Represent: A Novel View on Algorithm Selection for Software Verification}}}, year = {{2020}}, } @proceedings{21019, editor = {{Ahrendt, Wolfgang and Wehrheim, Heike}}, isbn = {{978-3-030-50994-1}}, publisher = {{Springer}}, title = {{{Tests and Proofs - 14th International Conference, TAP@STAF 2020, Bergen, Norway, June 22-23, 2020, Proceedings [postponed]}}}, doi = {{10.1007/978-3-030-50995-8}}, volume = {{12165}}, year = {{2020}}, } @unpublished{17825, abstract = {{Software verification has recently made enormous progress due to the development of novel verification methods and the speed-up of supporting technologies like SMT solving. To keep software verification tools up to date with these advances, tool developers keep on integrating newly designed methods into their tools, almost exclusively by re-implementing the method within their own framework. While this allows for a conceptual re-use of methods, it requires novel implementations for every new technique. In this paper, we employ cooperative verification in order to avoid reimplementation and enable usage of novel tools as black-box components in verification. Specifically, cooperation is employed for the core ingredient of software verification which is invariant generation. Finding an adequate loop invariant is key to the success of a verification run. Our framework named CoVerCIG allows a master verification tool to delegate the task of invariant generation to one or several specialized helper invariant generators. Their results are then utilized within the verification run of the master verifier, allowing in particular for crosschecking the validity of the invariant. We experimentally evaluate our framework on an instance with two masters and three different invariant generators using a number of benchmarks from SV-COMP 2020. The experiments show that the use of CoVerCIG can increase the number of correctly verified tasks without increasing the used resources}}, author = {{Haltermann, Jan Frederik and Wehrheim, Heike}}, booktitle = {{arXiv:2008.04551}}, title = {{{Cooperative Verification via Collective Invariant Generation}}}, year = {{2020}}, } @inproceedings{16724, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the ACM SIGSOFT International Symposium on Software Testing and Analysis (ISSTA).}}, publisher = {{ACM}}, title = {{{Higher Income, Larger Loan? Monotonicity Testing of Machine Learning Models}}}, year = {{2020}}, } @article{16725, author = {{Richter, Cedric and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, journal = {{Journal of Automated Software Engineering}}, publisher = {{Springer}}, title = {{{Algorithm Selection for Software Validation Based on Graph Kernels}}}, year = {{2020}}, } @article{13770, author = {{Karl, Holger and Kundisch, Dennis and Meyer auf der Heide, Friedhelm and Wehrheim, Heike}}, journal = {{Business & Information Systems Engineering}}, number = {{6}}, pages = {{467--481}}, publisher = {{Springer}}, title = {{{A Case for a New IT Ecosystem: On-The-Fly Computing}}}, doi = {{10.1007/s12599-019-00627-x}}, volume = {{62}}, year = {{2020}}, } @inproceedings{16214, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria}}, editor = {{Felderer, Michael and Hasselbring, Wilhelm and Rabiser, Rick and Jung, Reiner}}, pages = {{123--124}}, publisher = {{Gesellschaft f{\"{u}}r Informatik e.V.}}, title = {{{Reproducing Taint-Analysis Results with ReproDroid}}}, doi = {{10.18420/SE2020_36}}, year = {{2020}}, } @inproceedings{3287, abstract = {{For optimal placement and orchestration of network services, it is crucial that their structure and semantics are specified clearly and comprehensively and are available to an orchestrator. Existing specification approaches are either ambiguous or miss important aspects regarding the behavior of virtual network functions (VNFs) forming a service. We propose to formally and unambiguously specify the behavior of these functions and services using Queuing Petri Nets (QPNs). QPNs are an established method that allows to express queuing, synchronization, stochastically distributed processing delays, and changing traffic volume and characteristics at each VNF. With QPNs, multiple VNFs can be connected to complete network services in any structure, even specifying bidirectional network services containing loops. We discuss how management and orchestration systems can benefit from our clear and comprehensive specification approach, leading to better placement of VNFs and improved Quality of Service. Another benefit of formally specifying network services with QPNs are diverse analysis options, which allow valuable insights such as the distribution of end-to-end delay. We propose a tool-based workflow that supports the specification of network services and the automatic generation of corresponding simulation code to enable an in-depth analysis of their behavior and performance.}}, author = {{Schneider, Stefan Balthasar and Sharma, Arnab and Karl, Holger and Wehrheim, Heike}}, booktitle = {{2019 IFIP/IEEE International Symposium on Integrated Network Management (IM)}}, location = {{Washington, DC, USA}}, pages = {{116----124}}, publisher = {{IFIP}}, title = {{{Specifying and Analyzing Virtual Network Services Using Queuing Petri Nets}}}, year = {{2019}}, } @inproceedings{7752, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the Software Engineering Conference (SE)}}, isbn = {{978-3-88579-686-2}}, location = {{Stuttgart}}, pages = {{157 -- 158}}, publisher = {{Gesellschaft für Informatik e.V. (GI)}}, title = {{{Testing Balancedness of ML Algorithms}}}, volume = {{P-292}}, year = {{2019}}, } @misc{7623, author = {{Zhang, Shikun}}, pages = {{64}}, publisher = {{Universität Paderborn}}, title = {{{Combining Android Apps for Analysis Purposes}}}, year = {{2019}}, } @inproceedings{7635, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{IEEE International Conference on Software Testing, Verification and Validation (ICST)}}, location = {{Xi'an, China, April, 2019}}, pages = {{125----135}}, publisher = {{IEEE}}, title = {{{Testing Machine Learning Algorithms for Balanced Data Usage}}}, year = {{2019}}, } @misc{12885, author = {{Haltermann, Jan Frederik}}, title = {{{Analyzing Data Usage in Array Programs}}}, year = {{2019}}, } @inproceedings{15838, abstract = {{In the field of software analysis a trade-off between scalability and accuracy always exists. In this respect, Android app analysis is no exception, in particular, analyzing large or many apps can be challenging. Dealing with many small apps is a typical challenge when facing micro-benchmarks such as DROIDBENCH or ICC-BENCH. These particular benchmarks are not only used for the evaluation of novel tools but also in continuous integration pipelines of existing mature tools to maintain and guarantee a certain quality-level. Considering this latter usage it becomes very important to be able to achieve benchmark results as fast as possible. Hence, benchmarks have to be optimized for this purpose. One approach to do so is app merging. We implemented the Android Merge Tool (AMT) following this approach and show that its novel aspects can be used to produce scaled up and accurate benchmarks. For such benchmarks Android app analysis tools do not suffer from the scalability-accuracy trade-off anymore. We show this throughout detailed experiments on DROIDBENCH employing three different analysis tools (AMANDROID, ICCTA, FLOWDROID). Benchmark execution times are largely reduced without losing benchmark accuracy. Moreover, we argue why AMT is an advantageous successor of the state-of-the-art app merging tool (APKCOMBINER) in analysis lift-up scenarios.}}, author = {{Pauck, Felix and Zhang, Shikun}}, booktitle = {{2019 34th IEEE/ACM International Conference on Automated Software Engineering Workshop (ASEW)}}, isbn = {{9781728141367}}, keywords = {{Program Analysis, Android App Analysis, Taint Analysis, App Merging, Benchmark}}, title = {{{Android App Merging for Benchmark Speed-Up and Analysis Lift-Up}}}, doi = {{10.1109/asew.2019.00019}}, year = {{2019}}, } @inproceedings{16215, author = {{Derrick, John and Doherty, Simon and Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Methods - The Next 30 Years - Third World Congress, {FM} 2019, Porto, Portugal, October 7-11, 2019, Proceedings}}, editor = {{H. ter Beek, Maurice and McIver, Annabelle and N. Oliveira, Jos{\'{e}}}}, pages = {{179--195}}, publisher = {{Springer}}, title = {{{Verifying Correctness of Persistent Concurrent Data Structures}}}, doi = {{10.1007/978-3-030-30942-8\_12}}, volume = {{11800}}, year = {{2019}}, } @article{16216, author = {{Russo, Alessandra and Schürr, Andy and Wehrheim, Heike}}, journal = {{Formal Asp. Comput.}}, number = {{5}}, pages = {{457--458}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-019-00495-y}}, volume = {{31}}, year = {{2019}}, } @article{16217, author = {{Fränzle, Martin and Kapur, Deepak and Wehrheim, Heike and Zhan, Naijun}}, journal = {{Formal Asp. Comput.}}, number = {{1}}, pages = {{1}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-018-00477-6}}, volume = {{31}}, year = {{2019}}, } @inbook{13872, author = {{Beyer, Dirk and Jakobs, Marie-Christine}}, booktitle = {{Fundamental Approaches to Software Engineering}}, isbn = {{9783030167219}}, issn = {{0302-9743}}, title = {{{CoVeriTest: Cooperative Verifier-Based Testing}}}, doi = {{10.1007/978-3-030-16722-6_23}}, year = {{2019}}, } @inproceedings{13993, author = {{Derrick, John and Doherty, Simon and Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Methods - The Next 30 Years - Third World Congress, {FM} 2019, Porto, Portugal, October 7-11, 2019, Proceedings}}, pages = {{179--195}}, title = {{{Verifying Correctness of Persistent Concurrent Data Structures}}}, doi = {{10.1007/978-3-030-30942-8\_12}}, year = {{2019}}, } @article{10011, author = {{Fränzle, Martin and Kapur, Deepak and Wehrheim, Heike and Zhan, Naijun}}, journal = {{Formal Asp. Comput.}}, number = {{1}}, pages = {{1}}, title = {{{Editorial}}}, doi = {{10.1007/s00165-018-00477-6}}, volume = {{31}}, year = {{2019}}, } @inproceedings{10091, author = {{König, Jürgen and Wehrheim, Heike}}, booktitle = {{{NASA} Formal Methods - 11th International Symposium, {NFM} 2019, Houston, TX, USA, May 7-9, 2019, Proceedings}}, editor = {{M. Badger, Julia and Yvonne Rozier, Kristin}}, pages = {{263--279}}, publisher = {{Springer}}, title = {{{Data Independence for Software Transactional Memory}}}, doi = {{10.1007/978-3-030-20652-9\_18}}, volume = {{11460}}, year = {{2019}}, } @inproceedings{10092, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{Proceedings of the 24th {ACM} {SIGPLAN} Symposium on Principles and Practice of Parallel Programming, PPoPP 2019, Washington, DC, USA, February 16-20, 2019}}, editor = {{K. Hollingsworth, Jeffrey and Keidar, Idit}}, pages = {{355--365}}, publisher = {{{ACM}}}, title = {{{Verifying C11 programs operationally}}}, doi = {{10.1145/3293883.3295702}}, year = {{2019}}, } @inproceedings{10093, author = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}}, booktitle = {{Software Engineering and Software Management (SE/SWM 2019), Stuttgart, Germany, February 18-22, 2019}}, editor = {{Becker, Steffen and Bogicevic, Ivan and Herzwurm, Georg and Wagner, Stefan}}, pages = {{151----152}}, publisher = {{GI}}, title = {{{Combining Verifiers in Conditional Model Checking via Reducers}}}, doi = {{10.18420/se2019-46}}, volume = {{P-292}}, year = {{2019}}, } @inproceedings{10094, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Software Engineering and Software Management, {SE/SWM} 2019, Stuttgart, Germany, February 18-22, 2019}}, editor = {{Becker, Steffen and Bogicevic, Ivan and Herzwurm, Georg and Wagner, Stefan}}, pages = {{157--158}}, publisher = {{{GI}}}, title = {{{Testing Balancedness of ML Algorithms}}}, doi = {{10.18420/se2019-48}}, volume = {{{P-292}}}, year = {{2019}}, } @inproceedings{10095, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{Tools and Algorithms for the Construction and Analysis of Systems - 25 Years of {TACAS:} TOOLympics, Held as Part of {ETAPS} 2019, Prague, Czech Republic, April 6-11, 2019, Proceedings, Part {III}}}, editor = {{Beyer, Dirk and Huisman, Marieke and Kordon, Fabrice and Steffen, Bernhard}}, pages = {{229--233}}, publisher = {{Springer}}, title = {{{PeSCo: Predicting Sequential Combinations of Verifiers - (Competition Contribution)}}}, doi = {{10.1007/978-3-030-17502-3_19}}, volume = {{11429}}, year = {{2019}}, } @misc{10105, author = {{Haltermann, Jan}}, publisher = {{Universität Paderborn}}, title = {{{Analyzing Data Usage in Array Programs}}}, year = {{2019}}, } @inproceedings{10108, abstract = {{Recent years have seen the development of numerous tools for the analysis of taint flows in Android apps. Taint analyses aim at detecting data leaks, accidentally or by purpose programmed into apps. Often, such tools specialize in the treatment of specific features impeding precise taint analysis (like reflection or inter-app communication). This multitude of tools, their specific applicability and their various combination options complicate the selection of a tool (or multiple tools) when faced with an analysis instance, even for knowledgeable users, and hence hinders the successful adoption of taint analyses. In this work, we thus present CoDiDroid, a framework for cooperative Android app analysis. CoDiDroid (1) allows users to ask questions about flows in apps in varying degrees of detail, (2) automatically generates subtasks for answering such questions, (3) distributes tasks onto analysis tools (currently DroidRA, FlowDroid, HornDroid, IC3 and two novel tools) and (4) at the end merges tool answers on subtasks into an overall answer. Thereby, users are freed from having to learn about the use and functionality of all these tools while still being able to leverage their capabilities. Moreover, we experimentally show that cooperation among tools pays off with respect to effectiveness, precision and scalability.}}, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Proceedings of the 2019 27th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering}}, isbn = {{978-1-4503-5572-8}}, keywords = {{Android Taint Analysis, Cooperation, Precision, Tools}}, pages = {{374--384}}, title = {{{Together Strong: Cooperative Android App Analysis}}}, doi = {{10.1145/3338906.3338915}}, year = {{2019}}, } @inproceedings{13874, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Tests and Proofs - 13th International Conference, {TAP} 2019, Held as Part of the Third World Congress on Formal Methods 2019, Porto, Portugal, October 9-11, 2019, Proceedings}}, pages = {{3--20}}, title = {{{When Are Software Verification Results Valid for Approximate Hardware?}}}, doi = {{10.1007/978-3-030-31157-5_1}}, year = {{2019}}, } @misc{3320, author = {{Rautenberg, Kai}}, publisher = {{Universität Paderborn}}, title = {{{Korrektheitsbeweise für Muster von Servicekompositionen}}}, year = {{2018}}, } @inproceedings{3414, abstract = {{Over the years, Design by Contract (DbC) has evolved as a powerful concept for program documentation, testing, and verification. Contracts formally specify assertions on (mostly) object-oriented programs: pre- and postconditions of methods, class invariants, allowed call orders, etc. Missing in the long list of properties specifiable by contracts are, however, method correlations: DbC languages fall short on stating assertions relating methods. In this paper, we propose the novel concept of inter-method contract, allowing precisely for expressing method correlations.We present JMC as a language for specifying and JMCTest as a tool for dynamically checking inter-method contracts on Java programs. JMCTest fully automatically generates objects on which the contracted methods are called and the validity of the contract is checked. Using JMCTest, we detected that large Java code bases (e.g. JBoss, Java RT) frequently violate standard inter-method contracts. In comparison to other verification tools inspecting (some) inter-method contracts, JMCTest can find bugs that remain undetected by those tools.}}, author = {{Börding, Paul and Haltermann, Jan Frederik and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the IFIP International Conference on Testing Software and Systems (ICTSS 2018)}}, location = {{Cádiz, Spain}}, pages = {{39----55}}, publisher = {{Springer}}, title = {{{JMCTest: Automatically Testing Inter-Method Contracts in Java}}}, volume = {{11146}}, year = {{2018}}, } @inbook{3536, author = {{Schellhorn, Gerhard and Wedel, Monika and Travkin, Oleg and König, Jürgen and Wehrheim, Heike}}, booktitle = {{Software Engineering and Formal Methods}}, isbn = {{9783319929699}}, issn = {{0302-9743}}, pages = {{105--120}}, publisher = {{Springer International Publishing}}, title = {{{FastLane Is Opaque – a Case Study in Mechanized Proofs of Opacity}}}, doi = {{10.1007/978-3-319-92970-5_7}}, year = {{2018}}, } @article{3153, author = {{Doherty, Simon and Derrick, John and Dongol, Brijesh and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Causal Linearizability: Compositionality for Partially Ordered Executions}}}, year = {{2018}}, } @unpublished{2711, abstract = {{In recent years, researchers have developed a number of tools to conduct taint analysis of Android applications. While all the respective papers aim at providing a thorough empirical evaluation, comparability is hindered by varying or unclear evaluation targets. Sometimes, the apps used for evaluation are not precisely described. In other cases, authors use an established benchmark but cover it only partially. In yet other cases, the evaluations differ in terms of the data leaks searched for, or lack a ground truth to compare against. All those limitations make it impossible to truly compare the tools based on those published evaluations. We thus present ReproDroid, a framework allowing the accurate comparison of Android taint analysis tools. ReproDroid supports researchers in inferring the ground truth for data leaks in apps, in automatically applying tools to benchmarks, and in evaluating the obtained results. We use ReproDroid to comparatively evaluate on equal grounds the six prominent taint analysis tools Amandroid, DIALDroid, DidFail, DroidSafe, FlowDroid and IccTA. The results are largely positive although four tools violate some promises concerning features and accuracy. Finally, we contribute to the area of unbiased benchmarking with a new and improved version of the open test suite DroidBench.}}, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{arXiv:1804.02903}}, title = {{{Do Android Taint Analysis Tools Keep their Promises?}}}, year = {{2018}}, } @inproceedings{5774, abstract = {{Information flow analysis investigates the flow of data in applications, checking in particular for flows from private sources to public sinks. Flow- and path-sensitive analyses are, however, often too costly to be performed every time a security-critical application is run. In this paper, we propose a variant of proof carrying code for information flow security. To this end, we develop information flow (IF) certificates which get attached to programs as well as a method for IF certificate validation. We prove soundness of our technique, i.e., show it to be tamper-free. The technique is implemented within the program analysis tool CPAchecker. Our experiments confirm that the use of certificates pays off for costly analysis runs.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing – ICTAC 2018}}, isbn = {{9783030025076}}, issn = {{0302-9743}}, pages = {{435--454}}, publisher = {{Springer International Publishing}}, title = {{{Information Flow Certificates}}}, doi = {{10.1007/978-3-030-02508-3_23}}, year = {{2018}}, } @inproceedings{4999, author = {{Pauck, Felix and Bodden, Eric and Wehrheim, Heike}}, booktitle = {{Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering - ESEC/FSE 2018}}, isbn = {{9781450355735}}, publisher = {{ACM Press}}, title = {{{Do Android taint analysis tools keep their promises?}}}, doi = {{10.1145/3236024.3236029}}, year = {{2018}}, } @article{6828, author = {{Derrick, John and Doherty, Simon and Dongol, Brijesh and Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}}, journal = {{Formal Asp. Comput.}}, number = {{5}}, pages = {{597--625}}, title = {{{Mechanized proofs of opacity: a comparison of two techniques}}}, doi = {{10.1007/s00165-017-0433-3}}, volume = {{30}}, year = {{2018}}, } @inproceedings{6836, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{Integrated Formal Methods - 14th International Conference, {IFM} 2018, Maynooth, Ireland, September 5-7, 2018, Proceedings}}, pages = {{110--129}}, title = {{{Making Linearizability Compositional for Partially Ordered Executions}}}, doi = {{10.1007/978-3-319-98938-9\_7}}, year = {{2018}}, } @inproceedings{6838, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{Integrated Formal Methods - 14th International Conference, {IFM} 2018, Maynooth, Ireland, September 5-7, 2018, Proceedings}}, pages = {{110--129}}, title = {{{Making Linearizability Compositional for Partially Ordered Executions}}}, doi = {{10.1007/978-3-319-98938-9\_7}}, year = {{2018}}, } @inproceedings{6839, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{32nd International Symposium on Distributed Computing, {DISC} 2018, New Orleans, LA, USA, October 15-19, 2018}}, pages = {{45:1--45:3}}, title = {{{Brief Announcement: Generalising Concurrent Correctness to Weak Memory}}}, doi = {{10.4230/LIPIcs.DISC.2018.45}}, year = {{2018}}, } @article{1043, abstract = {{Approximate computing (AC) is an emerging paradigm for energy-efficient computation. The basic idea of AC is to sacrifice high precision for low energy by allowing hardware to carry out “approximately correct” calculations. This provides a major challenge for software quality assurance: programs successfully verified to be correct might be erroneous on approximate hardware. In this letter, we present a novel approach for determining under what conditions a software verification result is valid for approximate hardware. To this end, we compute the allowed tolerances for AC hardware from successful verification runs. More precisely, we derive a set of constraints which—when met by the AC hardware—guarantees the verification result to carry over to AC. On the practical side, we furthermore: 1) show how to extract tolerances from verification runs employing predicate abstraction as verification technology and 2) show how to check such constraints on hardware designs. We have implemented all techniques, and exemplify them on example C programs and a number of recently proposed approximate adders.}}, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, issn = {{1943-0663}}, journal = {{IEEE Embedded Systems Letters}}, pages = {{22--25}}, publisher = {{Institute of Electrical and Electronics Engineers (IEEE)}}, title = {{{Validity of Software Verification Results on Approximate Hardware}}}, doi = {{10.1109/LES.2017.2758200}}, year = {{2018}}, } @inproceedings{1096, abstract = {{to appear}}, author = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}}, booktitle = {{Proceedings of the 40th International Conference on Software Engineering (ICSE)}}, location = {{Gothenburg, Sweden}}, pages = {{1182----1193}}, publisher = {{ACM}}, title = {{{Reducer-Based Construction of Conditional Verifiers}}}, year = {{2018}}, } @misc{3512, author = {{Börding, Paul}}, publisher = {{Universität Paderborn}}, title = {{{Testing Java Method Contracts}}}, year = {{2017}}, } @inproceedings{3155, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods, {ICFEM} 2017, Xi'an, China, November 13-17, 2017, Proceedings}}, editor = {{Duan, Zhenhua and Ong, Luke}}, pages = {{362----378}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @inproceedings{3156, author = {{König, Jürgen and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2017 - 14th International Colloquium, Hanoi, Vietnam, October 23-27, 2017, Proceedings}}, editor = {{Van Hung, Dang and Kapur, Deepak}}, pages = {{118----135}}, title = {{{Value-Based or Conflict-Based? Opacity Definitions for STMs}}}, doi = {{10.1007/978-3-319-67729-3_8}}, year = {{2017}}, } @inproceedings{114, abstract = {{Proof witnesses are proof artifacts showing correctness of programs wrt. safety properties. The recent past has seen a rising interest in witnesses as (a) proofs in a proof-carrying-code context, (b) certificates for the correct functioning of verification tools, or simply (c) exchange formats for (partial) verification results. As witnesses in all theses scenarios need to be stored and processed, witnesses are required to be as small as possible. However, software verification tools – the prime suppliers of witnesses – do not necessarily construct small witnesses. In this paper, we present a formal account of proof witnesses. We introduce the concept of weakenings, reducing the complexity of proof witnesses while preserving the ability of witnessing safety. We develop aweakening technique for a specific class of program analyses, and prove it to be sound. Finally, we experimentally demonstrate our weakening technique to indeed achieve a size reduction of proof witnesses.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{NASA Formal Methods: 9th International Symposium}}, editor = {{Barrett, Clark and Davies, Misty and Kahsai, Temesghen}}, pages = {{389--403}}, title = {{{Compact Proof Witnesses}}}, doi = {{10.1007/978-3-319-57288-8_28}}, year = {{2017}}, } @inproceedings{115, abstract = {{Whenever customers have to decide between different instances of the same product, they are interested in buying the best product. In contrast, companies are interested in reducing the construction effort (and usually as a consequence thereof, the quality) to gain profit. The described setting is widely known as opposed preferences in quality of the product and also applies to the context of service-oriented computing. In general, service-oriented computing emphasizes the construction of large software systems out of existing services, where services are small and self-contained pieces of software that adhere to a specified interface. Several implementations of the same interface are considered as several instances of the same service. Thereby, customers are interested in buying the best service implementation for their service composition wrt. to metrics, such as costs, energy, memory consumption, or execution time. One way to ensure the service quality is to employ certificates, which can come in different kinds: Technical certificates proving correctness can be automatically constructed by the service provider and again be automatically checked by the user. Digital certificates allow proof of the integrity of a product. Other certificates might be rolled out if service providers follow a good software construction principle, which is checked in annual audits. Whereas all of these certificates are handled differently in service markets, what they have in common is that they influence the buying decisions of customers. In this paper, we review state-of-the-art developments in certification with respect to service-oriented computing. We not only discuss how certificates are constructed and handled in service-oriented computing but also review the effects of certificates on the market from an economic perspective.}}, author = {{Jakobs, Marie-Christine and Krämer, Julia and van Straaten, Dirk and Lettmann, Theodor}}, booktitle = {{The Ninth International Conferences on Advanced Service Computing (SERVICE COMPUTATION)}}, editor = {{Marcelo De Barros, Janusz Klink,Tadeus Uhl, Thomas Prinz}}, pages = {{7--12}}, title = {{{Certification Matters for Service Markets}}}, year = {{2017}}, } @article{90, abstract = {{We propose and extend an approach for the verification of safety properties for parameterized timed systems modeled as networks of timed automata. For this task, we introduce an incremental workflow that is based on our algorithm IC3 with Zones. It proceeds in a cycle in which single models of the system are verified, and the verification results are employed for the reasoning about the entire system. Starting with the smallest instances, the verification of the safety property is carried out fast and efficient. On successful verification, the algorithm produces an inductive strengthening of the safety property. We reuse this result and try to reason about the entire parameterized timed system. To this end, we extrapolate the inductive strengthening into a candidate for the next-larger model. In case this candidate is a valid inductive strengthening for the next larger model, our main theorem reasons about all models of the parameterized timed system, stating that the safety property holds true for all models. Otherwise, the main cycle starts over with the verification of the next larger model. This workflow is iterated indefinitely, until able to reason about the entire parameterized timed system, until a counterexample trace is found, or until the single models become too large to be handled in the verification. We reuse the intermediate results in a Feedback-loop in order to accelerate the verification runs for the single models. Furthermore, we consider an extended formalism in comparison to our previous publications.}}, author = {{Isenberg, Tobias}}, journal = {{ACM Transactions on Embedded Computing Systems}}, number = {{2}}, pages = {{47:1--47:24}}, publisher = {{ACM}}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1145/2984640}}, year = {{2017}}, } @inproceedings{5769, abstract = {{Information Flow Analysis (IFA) aims at detecting illegal flows of information between program entities. “Legality” is therein specified in terms of various security policies. For the analysis, this opens up two possibilities: building generic, policy independent and building specific, policy dependent IFAs. While the former needs to track all dependencies between program entities, the latter allows for a reduced and thus more efficient analysis. In this paper, we start out by formally defining a policy independent information flow analysis. Next, we show how to specialize this IFA via policy specific variable tracking, and prove soundness of the specialization. We furthermore investigate refinement relationships between policies, allowing an IFA for one policy to be employed for its refinements. As policy refinement depends on concrete program entities, we additionally propose a precomputation of policy refinement conditions, enabling an efficient refinement check for concrete programs.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods (ICFEM 2017)}}, isbn = {{9783319686899}}, issn = {{0302-9743}}, pages = {{362--378}}, publisher = {{Springer International Publishing}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @phdthesis{707, author = {{Walther, Sven}}, publisher = {{Universität Paderborn}}, title = {{{Knowledge-based Verification of Service Compositions}}}, doi = {{10.17619/UNIPB/1-307}}, year = {{2017}}, } @inproceedings{71, abstract = {{Today, software verification tools have reached the maturity to be used for large scale programs. Different tools perform differently well on varying code. A software developer is hence faced with the problem of choosing a tool appropriate for her program at hand. A ranking of tools on programs could facilitate the choice. Such rankings can, however, so far only be obtained by running all considered tools on the program.In this paper, we present a machine learning approach to predicting rankings of tools on programs. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for programs. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from the software verification competition SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy (rank correlation > 0.6).}}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 3rd International Workshop on Software Analytics}}, pages = {{23--26}}, title = {{{Predicting Rankings of Software Verification Tools}}}, doi = {{10.1145/3121257.3121262}}, year = {{2017}}, } @techreport{72, abstract = {{Software verification competitions, such as the annual SV-COMP, evaluate software verification tools with respect to their effectivity and efficiency. Typically, the outcome of a competition is a (possibly category-specific) ranking of the tools. For many applications, such as building portfolio solvers, it would be desirable to have an idea of the (relative) performance of verification tools on a given verification task beforehand, i.e., prior to actually running all tools on the task.In this paper, we present a machine learning approach to predicting rankings of tools on verification tasks. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for verification tasks. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy. In particular, our method outperforms a recently proposed feature-based approach of Demyanova et al. (when applied to rank predictions). }}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, title = {{{Predicting Rankings of Software Verification Competitions}}}, year = {{2017}}, } @article{68, abstract = {{Proof-carrying hardware (PCH) is a principle for achieving safety for dynamically reconfigurable hardware systems. The producer of a hardware module spends huge effort when creating a proof for a safety policy. The proof is then transferred as a certificate together with the configuration bitstream to the consumer of the hardware module, who can quickly verify the given proof. Previous work utilized SAT solvers and resolution traces to set up a PCH technology and corresponding tool flows. In this article, we present a novel technology for PCH based on inductive invariants. For sequential circuits, our approach is fundamentally stronger than the previous SAT-based one since we avoid the limitations of bounded unrolling. We contrast our technology to existing ones and show that it fits into previously proposed tool flows. We conduct experiments with four categories of benchmark circuits and report consumer and producer runtime and peak memory consumption, as well as the size of the certificates and the distribution of the workload between producer and consumer. Experiments clearly show that our new induction-based technology is superior for sequential circuits, whereas the previous SAT-based technology is the better choice for combinational circuits.}}, author = {{Isenberg, Tobias and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}}, journal = {{ACM Transactions on Design Automation of Electronic Systems}}, number = {{4}}, pages = {{61:1----61:23}}, publisher = {{ACM}}, title = {{{Proof-Carrying Hardware via Inductive Invariants}}}, doi = {{10.1145/3054743}}, year = {{2017}}, } @phdthesis{685, author = {{Jakobs, Marie-Christine}}, publisher = {{Universität Paderborn}}, title = {{{On-The-Fly Safety Checking - Customizing Program Certification and Program Restructuring}}}, doi = {{10.17619/UNIPB/1-104}}, year = {{2017}}, } @article{69, abstract = {{Today, software is traded worldwide on global markets, with apps being downloaded to smartphones within minutes or seconds. This poses, more than ever, the challenge of ensuring safety of software in the face of (1) unknown or untrusted software providers together with (2) resource-limited software consumers. The concept of Proof-Carrying Code (PCC), years ago suggested by Necula, provides one framework for securing the execution of untrusted code. PCC techniques attach safety proofs, constructed by software producers, to code. Based on the assumption that checking proofs is usually much simpler than constructing proofs, software consumers should thus be able to quickly check the safety of software. However, PCC techniques often suffer from the size of certificates (i.e., the attached proofs), making PCC techniques inefficient in practice.In this article, we introduce a new framework for the safe execution of untrusted code called Programs from Proofs (PfP). The basic assumption underlying the PfP technique is the fact that the structure of programs significantly influences the complexity of checking a specific safety property. Instead of attaching proofs to program code, the PfP technique transforms the program into an efficiently checkable form, thus guaranteeing quick safety checks for software consumers. For this transformation, the technique also uses a producer-side automatic proof of safety. More specifically, safety proving for the software producer proceeds via the construction of an abstract reachability graph (ARG) unfolding the control-flow automaton (CFA) up to the degree necessary for simple checking. To this end, we combine different sorts of software analysis: expensive analyses incrementally determining the degree of unfolding, and cheap analyses responsible for safety checking. Out of the abstract reachability graph we generate the new program. In its CFA structure, it is isomorphic to the graph and hence another, this time consumer-side, cheap analysis can quickly determine its safety.Like PCC, Programs from Proofs is a general framework instantiable with different sorts of (expensive and cheap) analysis. Here, we present the general framework and exemplify it by some concrete examples. We have implemented different instantiations on top of the configurable program analysis tool CPAchecker and report on experiments, in particular on comparisons with PCC techniques.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, journal = {{ACM Transactions on Programming Languages and Systems}}, number = {{2}}, pages = {{7:1--7:56}}, publisher = {{ACM}}, title = {{{Programs from Proofs: A Framework for the Safe Execution of Untrusted Software}}}, doi = {{10.1145/3014427}}, year = {{2017}}, } @misc{109, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Cooperative static analysis of Android applications}}}, year = {{2017}}, } @misc{201, author = {{Bröcher, Henrik}}, publisher = {{Universität Paderborn}}, title = {{{Evaluation von Graphpartitionierungsalgorithmen im Kontext von Konfigurierbarer Softwarezertifizierung}}}, year = {{2016}}, } @inproceedings{3157, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Critical Systems: Formal Methods and Automated Verification - Joint 21st International Workshop on Formal Methods for Industrial Critical Systems and 16th International Workshop on Automated Verification of Critical Systems, FMICS-AVoCS 2016, Pisa, Italy, September 26-28, 2016, Proceedings}}, editor = {{H. ter Beek, Maurice and Gnesi, Stefania and Knapp, Alexander}}, pages = {{189----204}}, title = {{{Verification of AUTOSAR Software Architectures with Timed Automata}}}, doi = {{10.1007/978-3-319-45943-1_13}}, year = {{2016}}, } @inproceedings{3158, author = {{Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2016 - 13th International Colloquium, Taipei, Taiwan, ROC, October 24-31, 2016, Proceedings}}, editor = {{Sampaio, Augusto and Wang, Farn}}, pages = {{3----24}}, title = {{{Verification of Concurrent Programs on Weak Memory Models}}}, doi = {{10.1007/978-3-319-46750-4_1}}, year = {{2016}}, } @inproceedings{3159, author = {{Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Integrated Formal Methods - 12th International Conference, {IFM} 2016, Reykjavik, Iceland, June 1-5, 2016, Proceedings}}, editor = {{Huisman, Marieke}}, pages = {{193----209}}, title = {{{Towards a Thread-Local Proof Technique for Starvation Freedom}}}, doi = {{10.1007/978-3-319-33693-0_13}}, year = {{2016}}, } @inproceedings{3160, author = {{Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{20th International Conference on Principles of Distributed Systems, {OPODIS} 2016, December 13-16, 2016, Madrid, Spain}}, editor = {{Fatourou, Panagiota and Jim{\'{e}}nez, Ernesto and Pedone, Fernando}}, pages = {{35:1----35:17}}, title = {{{Proving Opacity of a Pessimistic {STM}}}}, doi = {{10.4230/LIPIcs.OPODIS.2016.35}}, year = {{2016}}, } @article{3161, author = {{Isenberg, Tobias and Jakobs, Marie{-}Christine and Pauck, Felix and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Deriving approximation tolerance constraints from verification runs}}}, year = {{2016}}, } @article{175, abstract = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning.In this paper, we propose a method for constructing service compositions based on pre-verified templates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Their meaning is defined by an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Templates are proven correct using a Hoare-style proof calculus, extended by a specific rule for service calls. Construction of service compositions amounts to instantiation of templates with domain-specific services. Correctness of an instantiation can then simply be checked by verifying that the domain ontology (a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}}, author = {{Walther, Sven and Wehrheim, Heike}}, journal = {{Science of Computer Programming}}, pages = {{2----23}}, publisher = {{Elsevier}}, title = {{{On-The-Fly Construction of Provably Correct Service Compositions - Templates and Proofs}}}, doi = {{10.1016/j.scico.2016.04.002}}, year = {{2016}}, } @inproceedings{186, abstract = {{Software verification is an established method to ensure software safety. Nevertheless, verification still often fails, either because it consumes too much resources, e.g., time or memory, or the technique is not mature enough to verify the property. Often then discarding the partial verification, the validation process proceeds with techniques like testing.To enable standard testing to profit from previous, partial verification, we use a summary of the verification effort to simplify the program for subsequent testing. Our techniques use this summary to construct a residual program which only contains program paths with unproven assertions. Afterwards, the residual program can be used with standard testing tools.Our first experiments show that testing profits from the partial verification.The test effort is reduced and combined verification and testing is faster than a complete verification.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Software Engineering 2016}}, editor = {{Jens Knoop, Uwe Zdun}}, pages = {{17--18}}, title = {{{Just test what you cannot verify!}}}, year = {{2016}}, } @inproceedings{224, abstract = {{In modern software development, paradigms like component-based software engineering (CBSE) and service-oriented architectures (SOA) emphasize the construction of large software systems out of existing components or services. Therein, a service is a self-contained piece of software, which adheres to a specified interface. In a model-based software design, this interface constitutes our sole knowledge of the service at design time, while service implementations are not available. Therefore, correctness checks or detection of potential errors in service compositions has to be carried out without the possibility of executing services. This challenges the usage of standard software error localization techniques for service compositions. In this paper, we review state-of-the-art approaches for error localization of software and discuss their applicability to service compositions.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC 2016)}}, pages = {{248----262}}, title = {{{A short survey on using software error localization for service compositions}}}, doi = {{10.1007/978-3-319-44482-6_16}}, year = {{2016}}, } @inproceedings{226, abstract = {{Error detection, localization and correction are time-intensive tasks in software development, but crucial to deliver functionally correct products. Thus, automated approaches to these tasks have been intensively studied for standard software systems. For model-based software systems, the situation is different. While error detection is still well-studied, error localization and correction is a less-studied domain. In this paper, we examine error localization and correction for models of service compositions. Based on formal definitions of error and correction in this context, we show that the classical approach of error localization and correction, i.e. first determining a set of suspicious statements and then proposing changes to these statements, is ineffective in our context. In fact, it lessens the chance to succeed in finding a correction at all.In this paper, we introduce correction proposal as a novel approach on error correction in service compositions integrating error localization and correction in one combined step. In addition, we provide an algorithm to compute such correction proposals automatically.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 1st International Workshop on Formal to Practical Software Verification and Composition (VeryComp 2016)}}, pages = {{445----457}}, title = {{{A Formal Approach to Error Localization and Correction in Service Compositions}}}, doi = {{10.1007/978-3-319-50230-4_35}}, year = {{2016}}, } @inproceedings{227, abstract = {{Information flow analysis studies the flow of data between program entities (e.g. variables), where the allowed flow is specified via security policies. Typical information flow analyses compute a conservative (over-)approximation of the flows in a program. Such an analysis may thus signal non-existing violations of the security policy.In this paper, we propose a new technique for inspecting the reported violations (counterexamples) for spuriousity. Similar to counterexample-guided-abstraction-refinement (CEGAR) in software verification, we use the result of this inspection to improve the next round of the analysis. We prove soundness of this scheme.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Proceedings of the 18th International Conference on Formal Engineering Methods (ICFEM 2016)}}, pages = {{466----483}}, title = {{{A CEGAR Scheme for Information Flow Analysis}}}, doi = {{10.1007/978-3-319-47846-3_29}}, year = {{2016}}, } @inproceedings{170, abstract = {{We present PAndA2, an extendable, static analysis tool for Android apps which examines permission related security threats like overprivilege, existence of permission redelegation and permission flows. PAndA2 comes along with a textual and graphical visualization of the analysis result and even supports the comparison of analysis results for different android app versions.}}, author = {{Jakobs, Marie-Christine and Töws, Manuel and Pauck, Felix}}, booktitle = {{Workshop on Formal and Model-Driven Techniques for Developing Trustworthy Systems}}, editor = {{Ishikawa F, Romanovsky A, Troubitsyna E}}, title = {{{PAndA 2 : Analyzing Permission Use and Interplay in Android Apps (Tool Paper)}}}, year = {{2016}}, } @phdthesis{1190, author = {{Isenberg, Tobias}}, publisher = {{Universität Paderborn}}, title = {{{Induction-based Verification of Timed Systems}}}, year = {{2016}}, } @misc{162, author = {{Zhang, Guangli}}, publisher = {{Universität Paderborn}}, title = {{{Program Slicing: A Way of Separating WHILE Programs into Precise and Approximate Portions}}}, year = {{2016}}, } @misc{164, author = {{Czech, Mike}}, publisher = {{Universität Paderborn}}, title = {{{Predicting Rankings of Software Verification Tools Using Kernels for Structured Data}}}, year = {{2016}}, } @misc{133, abstract = {{.}}, author = {{Dewender, Markus}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Spin}}}, year = {{2016}}, } @misc{134, abstract = {{.}}, author = {{Heinisch, Philipp}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Prolog}}}, year = {{2016}}, } @inproceedings{250, abstract = {{Before execution, users should formally validate the correctness of software received from untrusted providers. To accelerate this validation, in the proof carrying code (PCC) paradigm the provider delivers the software together with a certificate, a formal proof of the software’s correctness. Thus, the user only checks if the attached certificate shows correctness of the delivered software.Recently, we introduced configurable program certification, a generic, PCC based framework supporting various software analyses and safety properties. Evaluation of our framework revealed that validation suffers from certificate reading. In this paper, we present two orthogonal approaches which improve certificate validation, both reducing the impact of certificate reading. The first approach reduces the certificate size, storing information only if it cannot easily be recomputed. The second approach partitions the certificate into independently checkable parts. The trick is to read parts of the certificate while already checking read parts. Our experiments show that validation highly benefits from our improvements.}}, author = {{Jakobs, Marie-Christine}}, booktitle = {{Proceedings of the 13th International Conference on Software Engineering and Formal Methods (SEFM)}}, pages = {{159----174}}, title = {{{Speed Up Configurable Certificate Validation by Certificate Reduction and Partitioning}}}, doi = {{10.1007/978-3-319-22969-0_12}}, year = {{2015}}, } @inproceedings{283, abstract = {{Today, software verification is an established analysis method which can provide high guarantees for software safety. However, the resources (time and/or memory) for an exhaustive verification are not always available, and analysis then has to resort to other techniques, like testing. Most often, the already achieved partial verification results arediscarded in this case, and testing has to start from scratch.In this paper, we propose a method for combining verification and testing in which testing only needs to check the residual fraction of an uncompleted verification. To this end, the partial results of a verification run are used to construct a residual program (and residual assertions to be checked on it). The residual program can afterwards be fed into standardtesting tools. The proposed technique is sound modulo the soundness of the testing procedure. Experimental results show that this combinedusage of verification and testing can significantly reduce the effort for the subsequent testing.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Fundamental Approaches to Software Engineering}}, editor = {{Egyed, Alexander and Schaefer, Ina}}, pages = {{100--114}}, title = {{{Just test what you cannot verify!}}}, doi = {{10.1007/978-3-662-46675-9_7}}, year = {{2015}}, } @inproceedings{285, abstract = {{We propose an incremental workflow for the verification of parameterized systems modeled as symmetric networks of timed automata. Starting with a small number of timed automata in the network, a safety property is verified using IC3, a state-of-the-art algorithm based on induction.The result of the verification, an inductive strengthening, is reused proposing a candidate inductive strengthening for a larger network.If the candidate is valid, our main theorem states that the safety property holds for all sizes of the network of timed automata. Otherwise the number of automata is increased and the next iteration is started with a new run of IC3.We propose and thoroughly examine optimizations to our workflow, e.g. Feedback mechanisms to speed up the run of IC3.}}, author = {{Isenberg, Tobias}}, booktitle = {{Proceedings of the 15th International Conference on Application of Concurrency to System Design (ACSD)}}, pages = {{1--9 }}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1109/ACSD.2015.13}}, year = {{2015}}, } @phdthesis{246, author = {{Besova, Galina}}, publisher = {{Universität Paderborn}}, title = {{{Systematic Development and Re-Use of Model Tranformations}}}, year = {{2015}}, } @inproceedings{262, abstract = {{Programs from Proofs" is a generic method which generates new programs out of correctness proofs of given programs. The technique ensures that the new and given program are behaviorally equivalent and that the new program is easily verifiable, thus serving as an alternative to proof-carrying code concepts. So far, this generic method has one instantiation that verifies type-state properties of programs. In this paper, we present a whole range of new instantiations, all based on data ow analyses. More precisely, we show how an imprecise but fast data ow analysis can be enhanced with a predicate analysis as to yield a precise but expensive analysis. Out of the safety proofs of this analysis, we generate new programs, again behaviorally equivalent to the given ones, which are easily verifiable" in the sense that now the data ow analysis alone can yield precise results. An experimental evaluation practically supports our claim of easy verification.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 30th Annual ACM Symposium on Applied Computing}}, pages = {{1729--1736}}, title = {{{Programs from Proofs of Predicated Dataflow Analyses}}}, doi = {{10.1145/2695664.2695690}}, year = {{2015}}, } @article{290, abstract = {{Model transformation is a key concept in model-driven software engineering. The definition of model transformations is usually based on meta-models describing the abstract syntax of languages. While meta-models are thereby able to abstract from uperfluous details of concrete syntax, they often loose structural information inherent in languages, like information on model elements always occurring together in particular shapes. As a consequence, model transformations cannot naturally re-use language structures, thus leading to unnecessary complexity in their development as well as in quality assurance.In this paper, we propose a new approach to model transformation development which allows to simplify the developed transformations and improve their quality via the exploitation of the languages׳ structures. The approach is based on context-free graph grammars and transformations defined by pairing productions of source and target grammars. We show that such transformations have important properties: they terminate and are sound, complete, and deterministic.}}, author = {{Besova, Galina and Steenken, Dominik and Wehrheim, Heike}}, journal = {{Computer Languages, Systems & Structures}}, pages = {{116--138}}, publisher = {{Elsevier}}, title = {{{Grammar-based model transformations: Definition, execution, and quality properties}}}, doi = {{10.1016/j.cl.2015.05.003}}, year = {{2015}}, }