@article{49439,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>The use of static analysis security testing (SAST) tools has been increasing in recent years. However, previous studies have shown that, when shipped to end users such as development or security teams, the findings of these tools are often unsatisfying. Users report high numbers of false positives or long analysis times, making the tools unusable in the daily workflow. To address this, SAST tool creators provide a wide range of configuration options, such as customization of rules through domain-specific languages or specification of the application-specific analysis scope. In this paper, we study the configuration space of selected existing SAST tools when used within the integrated development environment (IDE). We focus on the configuration options that impact three dimensions, for which a trade-off is unavoidable, i.e., precision, recall, and analysis runtime. We perform a between-subjects user study with 40 users from multiple development and security teams - to our knowledge, the largest population for this kind of user study in the software engineering community. The results show that users who configure SAST tools are more effective in resolving security vulnerabilities detected by the tools than those using the default configuration. Based on post-study interviews, we identify common strategies that users have while configuring the SAST tools to provide further insights for tool creators. Finally, an evaluation of the configuration options of two commercial SAST tools, <jats:sc>Fortify</jats:sc> and <jats:sc>CheckMarx</jats:sc>, reveals that a quarter of the users do not understand the configuration options provided. The configuration options that are found most useful relate to the analysis scope.</jats:p>}},
  author       = {{Piskachev, Goran and Becker, Matthias and Bodden, Eric}},
  issn         = {{1382-3256}},
  journal      = {{Empirical Software Engineering}},
  keywords     = {{Software}},
  number       = {{5}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Can the configuration of static analyses make resolving security vulnerabilities more effective? - A user study}}},
  doi          = {{10.1007/s10664-023-10354-3}},
  volume       = {{28}},
  year         = {{2023}},
}

@article{27045,
  abstract     = {{Due to the lack of established real-world benchmark suites for static taint analyses of Android applications, evaluations of these analyses are often restricted and hard to compare. Even in evaluations that do use real-world apps, details about the ground truth in those apps are rarely documented, which makes it difficult to compare and reproduce the results. To push Android taint analysis research forward, this paper thus recommends criteria for constructing real-world benchmark suites for this specific domain, and presents TaintBench, the first real-world malware benchmark suite with documented taint flows. TaintBench benchmark apps include taint flows with complex structures, and addresses static challenges that are commonly agreed on by the community. Together with the TaintBench suite, we introduce the TaintBench framework, whose goal is to simplify real-world benchmarking of Android taint analyses. First, a usability test shows that the framework improves experts’ performance and perceived usability when documenting and inspecting taint flows. Second, experiments using TaintBench reveal new insights for the taint analysis tools Amandroid and FlowDroid: (i) They are less effective on real-world malware apps than on synthetic benchmark apps. (ii) Predefined lists of sources and sinks heavily impact the tools’ accuracy. (iii) Surprisingly, up-to-date versions of both tools are less accurate than their predecessors.}},
  author       = {{Luo, Linghui and Pauck, Felix and Piskachev, Goran and Benz, Manuel and Pashchenko, Ivan and Mory, Martin and Bodden, Eric and Hermann, Ben and Massacci, Fabio}},
  issn         = {{1382-3256}},
  journal      = {{Empirical Software Engineering}},
  title        = {{{TaintBench: Automatic real-world malware benchmarking of Android taint analyses}}},
  doi          = {{10.1007/s10664-021-10013-5}},
  year         = {{2021}},
}

