@inproceedings{35426, author = {{Richter, Cedric and Haltermann, Jan Frederik and Jakobs, Marie-Christine and Pauck, Felix and Schott, Stefan and Wehrheim, Heike}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Are Neural Bug Detectors Comparable to Software Developers on Variable Misuse Bugs?}}}, doi = {{10.1145/3551349.3561156}}, year = {{2023}}, } @inproceedings{36848, author = {{Schott, Stefan and Pauck, Felix}}, booktitle = {{2022 IEEE 22nd International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, publisher = {{IEEE}}, title = {{{Benchmark Fuzzing for Android Taint Analyses}}}, doi = {{10.1109/scam55253.2022.00007}}, year = {{2023}}, } @inproceedings{35427, author = {{Pauck, Felix}}, booktitle = {{37th IEEE/ACM International Conference on Automated Software Engineering}}, publisher = {{ACM}}, title = {{{Scaling Arbitrary Android App Analyses}}}, doi = {{10.1145/3551349.3561339}}, year = {{2023}}, } @phdthesis{43108, author = {{Pauck, Felix}}, publisher = {{Paderborn University}}, title = {{{Cooperative Android App Analysis}}}, doi = {{10.17619/UNIPB/1-1698}}, year = {{2023}}, } @phdthesis{47833, author = {{König, Jürgen}}, title = {{{On the Membership and Correctness Problem for State Serializability and Value Opacity}}}, year = {{2023}}, } @inproceedings{32590, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE Conference on Software Testing, Verification and Validation (ICST)}}, pages = {{162--173}}, title = {{{Learning Realistic Mutations: Bug Creation for Neural Bug Detectors}}}, doi = {{10.1109/ICST53961.2022.00027}}, year = {{2022}}, } @inproceedings{32591, author = {{Richter, Cedric and Wehrheim, Heike}}, booktitle = {{2022 IEEE/ACM 19th International Conference on Mining Software Repositories (MSR)}}, pages = {{418--422}}, title = {{{TSSB-3M: Mining single statement bugs at massive scale}}}, doi = {{10.1145/3524842.3528505}}, year = {{2022}}, } @inproceedings{45248, author = {{Dongol, Brijesh and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{33rd International Conference on Concurrency Theory, CONCUR 2022, September 12-16, 2022, Warsaw, Poland}}, editor = {{Klin, Bartek and Lasota, Slawomir and Muscholl, Anca}}, pages = {{31:1–31:23}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum für Informatik}}, title = {{{Weak Progressive Forward Simulation Is Necessary and Sufficient for Strong Observational Refinement}}}, doi = {{10.4230/LIPIcs.CONCUR.2022.31}}, volume = {{243}}, year = {{2022}}, } @inproceedings{28350, abstract = {{In recent years, we observe an increasing amount of software with machine learning components being deployed. This poses the question of quality assurance for such components: how can we validate whether specified requirements are fulfilled by a machine learned software? Current testing and verification approaches either focus on a single requirement (e.g., fairness) or specialize on a single type of machine learning model (e.g., neural networks). In this paper, we propose property-driven testing of machine learning models. Our approach MLCheck encompasses (1) a language for property specification, and (2) a technique for systematic test case generation. The specification language is comparable to property-based testing languages. Test case generation employs advanced verification technology for a systematic, property dependent construction of test suites, without additional user supplied generator functions. We evaluate MLCheck using requirements and data sets from three different application areas (software discrimination, learning on knowledge graphs and security). Our evaluation shows that despite its generality MLCheck can even outperform specialised testing approaches while having a comparable runtime}}, author = {{Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille and Wehrheim, Heike}}, booktitle = {{Proceedings of the 20th IEEE International Conference on Machine Learning and Applications (ICMLA)}}, publisher = {{IEEE}}, title = {{{MLCHECK–Property-Driven Testing of Machine Learning Classifiers}}}, year = {{2021}}, } @article{27045, abstract = {{Due to the lack of established real-world benchmark suites for static taint analyses of Android applications, evaluations of these analyses are often restricted and hard to compare. Even in evaluations that do use real-world apps, details about the ground truth in those apps are rarely documented, which makes it difficult to compare and reproduce the results. To push Android taint analysis research forward, this paper thus recommends criteria for constructing real-world benchmark suites for this specific domain, and presents TaintBench, the first real-world malware benchmark suite with documented taint flows. TaintBench benchmark apps include taint flows with complex structures, and addresses static challenges that are commonly agreed on by the community. Together with the TaintBench suite, we introduce the TaintBench framework, whose goal is to simplify real-world benchmarking of Android taint analyses. First, a usability test shows that the framework improves experts’ performance and perceived usability when documenting and inspecting taint flows. Second, experiments using TaintBench reveal new insights for the taint analysis tools Amandroid and FlowDroid: (i) They are less effective on real-world malware apps than on synthetic benchmark apps. (ii) Predefined lists of sources and sinks heavily impact the tools’ accuracy. (iii) Surprisingly, up-to-date versions of both tools are less accurate than their predecessors.}}, author = {{Luo, Linghui and Pauck, Felix and Piskachev, Goran and Benz, Manuel and Pashchenko, Ivan and Mory, Martin and Bodden, Eric and Hermann, Ben and Massacci, Fabio}}, issn = {{1382-3256}}, journal = {{Empirical Software Engineering}}, title = {{{TaintBench: Automatic real-world malware benchmarking of Android taint analyses}}}, doi = {{10.1007/s10664-021-10013-5}}, year = {{2021}}, } @misc{22304, author = {{Schott, Stefan}}, title = {{{Android App Analysis Benchmark Case Generation}}}, year = {{2021}}, } @inproceedings{28199, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{2021 IEEE 21st International Working Conference on Source Code Analysis and Manipulation (SCAM)}}, title = {{{Jicer: Simplifying Cooperative Android App Analysis Tasks}}}, doi = {{10.1109/scam52516.2021.00031}}, year = {{2021}}, } @inproceedings{21238, author = {{Pauck, Felix and Wehrheim, Heike}}, booktitle = {{Software Engineering 2021}}, editor = {{Koziolek, Anne and Schaefer, Ina and Seidl, Christoph}}, pages = {{ 83--84 }}, publisher = {{Gesellschaft für Informatik e.V.}}, title = {{{Cooperative Android App Analysis with CoDiDroid}}}, doi = {{10.18420/SE2021_30 }}, year = {{2021}}, } @inproceedings{19656, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the 32th IFIP International Conference on Testing Software and Systems (ICTSS)}}, publisher = {{Springer}}, title = {{{Automatic Fairness Testing of Machine Learning Models}}}, year = {{2020}}, } @misc{19999, author = {{Mayer, Stefan}}, publisher = {{Universität Paderborn}}, title = {{{Optimierung von JMCTest beim Testen von Inter Method Contracts}}}, year = {{2020}}, } @inproceedings{20274, author = {{Bila, Eleni and Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{Formal Techniques for Distributed Objects, Components, and Systems - 40th {IFIP} {WG} 6.1 International Conference, {FORTE} 2020, Held as Part of the 15th International Federated Conference on Distributed Computing Techniques, DisCoTec 2020, Valletta, Malta, June 15-19, 2020, Proceedings}}, editor = {{Gotsman, Alexey and Sokolova, Ana}}, pages = {{39--58}}, publisher = {{Springer}}, title = {{{Defining and Verifying Durable Opacity: Correctness for Persistent Software Transactional Memory}}}, doi = {{10.1007/978-3-030-50086-3\_3}}, volume = {{12136}}, year = {{2020}}, } @inproceedings{20275, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Proceedings of the 15th International Conference on Software Technologies, {ICSOFT} 2020, Lieusaint, Paris, France, July 7-9, 2020}}, editor = {{van Sinderen, Marten and Fill, Hans{-}Georg and A. Maciaszek, Leszek}}, pages = {{15--26}}, publisher = {{ScitePress}}, title = {{{Consistency Analysis of AUTOSAR Timing Requirements}}}, doi = {{10.5220/0009766600150026}}, year = {{2020}}, } @inproceedings{20276, author = {{Beyer, Dirk and Wehrheim, Heike}}, booktitle = {{Leveraging Applications of Formal Methods, Verification and Validation: Verification Principles - 9th International Symposium on Leveraging Applications of Formal Methods, ISoLA 2020, Rhodes, Greece, October 20-30, 2020, Proceedings, Part {I}}}, editor = {{Margaria, Tiziana and Steffen, Bernhard}}, pages = {{143--167}}, publisher = {{Springer}}, title = {{{Verification Artifacts in Cooperative Verification: Survey and Unifying Component Framework}}}, doi = {{10.1007/978-3-030-61362-4\_8}}, volume = {{12476}}, year = {{2020}}, } @proceedings{20277, editor = {{Wehrheim, Heike and Cabot, Jordi}}, isbn = {{978-3-030-45233-9}}, publisher = {{Springer}}, title = {{{Fundamental Approaches to Software Engineering - 23rd International Conference, FASE 2020, Held as Part of the European Joint Conferences on Theory and Practice of Software, ETAPS 2020, Dublin, Ireland, April 25-30, 2020, Proceedings}}}, doi = {{10.1007/978-3-030-45234-6}}, volume = {{12076}}, year = {{2020}}, } @proceedings{20278, editor = {{Ahrendt, Wolfgang and Wehrheim, Heike}}, isbn = {{978-3-030-50994-1}}, publisher = {{Springer}}, title = {{{Tests and Proofs - 14th International Conference, TAP@STAF 2020, Bergen, Norway, June 22-23, 2020, Proceedings [postponed]}}}, doi = {{10.1007/978-3-030-50995-8}}, volume = {{12165}}, year = {{2020}}, }