@inproceedings{6839, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{32nd International Symposium on Distributed Computing, {DISC} 2018, New Orleans, LA, USA, October 15-19, 2018}}, pages = {{45:1--45:3}}, title = {{{Brief Announcement: Generalising Concurrent Correctness to Weak Memory}}}, doi = {{10.4230/LIPIcs.DISC.2018.45}}, year = {{2018}}, } @article{1043, abstract = {{Approximate computing (AC) is an emerging paradigm for energy-efficient computation. The basic idea of AC is to sacrifice high precision for low energy by allowing hardware to carry out “approximately correct” calculations. This provides a major challenge for software quality assurance: programs successfully verified to be correct might be erroneous on approximate hardware. In this letter, we present a novel approach for determining under what conditions a software verification result is valid for approximate hardware. To this end, we compute the allowed tolerances for AC hardware from successful verification runs. More precisely, we derive a set of constraints which—when met by the AC hardware—guarantees the verification result to carry over to AC. On the practical side, we furthermore: 1) show how to extract tolerances from verification runs employing predicate abstraction as verification technology and 2) show how to check such constraints on hardware designs. We have implemented all techniques, and exemplify them on example C programs and a number of recently proposed approximate adders.}}, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, issn = {{1943-0663}}, journal = {{IEEE Embedded Systems Letters}}, pages = {{22--25}}, publisher = {{Institute of Electrical and Electronics Engineers (IEEE)}}, title = {{{Validity of Software Verification Results on Approximate Hardware}}}, doi = {{10.1109/LES.2017.2758200}}, year = {{2018}}, } @inproceedings{1096, abstract = {{to appear}}, author = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}}, booktitle = {{Proceedings of the 40th International Conference on Software Engineering (ICSE)}}, location = {{Gothenburg, Sweden}}, pages = {{1182----1193}}, publisher = {{ACM}}, title = {{{Reducer-Based Construction of Conditional Verifiers}}}, year = {{2018}}, } @misc{3512, author = {{Börding, Paul}}, publisher = {{Universität Paderborn}}, title = {{{Testing Java Method Contracts}}}, year = {{2017}}, } @inproceedings{3155, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods, {ICFEM} 2017, Xi'an, China, November 13-17, 2017, Proceedings}}, editor = {{Duan, Zhenhua and Ong, Luke}}, pages = {{362----378}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @inproceedings{3156, author = {{König, Jürgen and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2017 - 14th International Colloquium, Hanoi, Vietnam, October 23-27, 2017, Proceedings}}, editor = {{Van Hung, Dang and Kapur, Deepak}}, pages = {{118----135}}, title = {{{Value-Based or Conflict-Based? Opacity Definitions for STMs}}}, doi = {{10.1007/978-3-319-67729-3_8}}, year = {{2017}}, } @inproceedings{114, abstract = {{Proof witnesses are proof artifacts showing correctness of programs wrt. safety properties. The recent past has seen a rising interest in witnesses as (a) proofs in a proof-carrying-code context, (b) certificates for the correct functioning of verification tools, or simply (c) exchange formats for (partial) verification results. As witnesses in all theses scenarios need to be stored and processed, witnesses are required to be as small as possible. However, software verification tools – the prime suppliers of witnesses – do not necessarily construct small witnesses. In this paper, we present a formal account of proof witnesses. We introduce the concept of weakenings, reducing the complexity of proof witnesses while preserving the ability of witnessing safety. We develop aweakening technique for a specific class of program analyses, and prove it to be sound. Finally, we experimentally demonstrate our weakening technique to indeed achieve a size reduction of proof witnesses.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{NASA Formal Methods: 9th International Symposium}}, editor = {{Barrett, Clark and Davies, Misty and Kahsai, Temesghen}}, pages = {{389--403}}, title = {{{Compact Proof Witnesses}}}, doi = {{10.1007/978-3-319-57288-8_28}}, year = {{2017}}, } @inproceedings{115, abstract = {{Whenever customers have to decide between different instances of the same product, they are interested in buying the best product. In contrast, companies are interested in reducing the construction effort (and usually as a consequence thereof, the quality) to gain profit. The described setting is widely known as opposed preferences in quality of the product and also applies to the context of service-oriented computing. In general, service-oriented computing emphasizes the construction of large software systems out of existing services, where services are small and self-contained pieces of software that adhere to a specified interface. Several implementations of the same interface are considered as several instances of the same service. Thereby, customers are interested in buying the best service implementation for their service composition wrt. to metrics, such as costs, energy, memory consumption, or execution time. One way to ensure the service quality is to employ certificates, which can come in different kinds: Technical certificates proving correctness can be automatically constructed by the service provider and again be automatically checked by the user. Digital certificates allow proof of the integrity of a product. Other certificates might be rolled out if service providers follow a good software construction principle, which is checked in annual audits. Whereas all of these certificates are handled differently in service markets, what they have in common is that they influence the buying decisions of customers. In this paper, we review state-of-the-art developments in certification with respect to service-oriented computing. We not only discuss how certificates are constructed and handled in service-oriented computing but also review the effects of certificates on the market from an economic perspective.}}, author = {{Jakobs, Marie-Christine and Krämer, Julia and van Straaten, Dirk and Lettmann, Theodor}}, booktitle = {{The Ninth International Conferences on Advanced Service Computing (SERVICE COMPUTATION)}}, editor = {{Marcelo De Barros, Janusz Klink,Tadeus Uhl, Thomas Prinz}}, pages = {{7--12}}, title = {{{Certification Matters for Service Markets}}}, year = {{2017}}, } @article{90, abstract = {{We propose and extend an approach for the verification of safety properties for parameterized timed systems modeled as networks of timed automata. For this task, we introduce an incremental workflow that is based on our algorithm IC3 with Zones. It proceeds in a cycle in which single models of the system are verified, and the verification results are employed for the reasoning about the entire system. Starting with the smallest instances, the verification of the safety property is carried out fast and efficient. On successful verification, the algorithm produces an inductive strengthening of the safety property. We reuse this result and try to reason about the entire parameterized timed system. To this end, we extrapolate the inductive strengthening into a candidate for the next-larger model. In case this candidate is a valid inductive strengthening for the next larger model, our main theorem reasons about all models of the parameterized timed system, stating that the safety property holds true for all models. Otherwise, the main cycle starts over with the verification of the next larger model. This workflow is iterated indefinitely, until able to reason about the entire parameterized timed system, until a counterexample trace is found, or until the single models become too large to be handled in the verification. We reuse the intermediate results in a Feedback-loop in order to accelerate the verification runs for the single models. Furthermore, we consider an extended formalism in comparison to our previous publications.}}, author = {{Isenberg, Tobias}}, journal = {{ACM Transactions on Embedded Computing Systems}}, number = {{2}}, pages = {{47:1--47:24}}, publisher = {{ACM}}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1145/2984640}}, year = {{2017}}, } @inproceedings{5769, abstract = {{Information Flow Analysis (IFA) aims at detecting illegal flows of information between program entities. “Legality” is therein specified in terms of various security policies. For the analysis, this opens up two possibilities: building generic, policy independent and building specific, policy dependent IFAs. While the former needs to track all dependencies between program entities, the latter allows for a reduced and thus more efficient analysis. In this paper, we start out by formally defining a policy independent information flow analysis. Next, we show how to specialize this IFA via policy specific variable tracking, and prove soundness of the specialization. We furthermore investigate refinement relationships between policies, allowing an IFA for one policy to be employed for its refinements. As policy refinement depends on concrete program entities, we additionally propose a precomputation of policy refinement conditions, enabling an efficient refinement check for concrete programs.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods (ICFEM 2017)}}, isbn = {{9783319686899}}, issn = {{0302-9743}}, pages = {{362--378}}, publisher = {{Springer International Publishing}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @phdthesis{707, author = {{Walther, Sven}}, publisher = {{Universität Paderborn}}, title = {{{Knowledge-based Verification of Service Compositions}}}, doi = {{10.17619/UNIPB/1-307}}, year = {{2017}}, } @inproceedings{71, abstract = {{Today, software verification tools have reached the maturity to be used for large scale programs. Different tools perform differently well on varying code. A software developer is hence faced with the problem of choosing a tool appropriate for her program at hand. A ranking of tools on programs could facilitate the choice. Such rankings can, however, so far only be obtained by running all considered tools on the program.In this paper, we present a machine learning approach to predicting rankings of tools on programs. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for programs. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from the software verification competition SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy (rank correlation > 0.6).}}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 3rd International Workshop on Software Analytics}}, pages = {{23--26}}, title = {{{Predicting Rankings of Software Verification Tools}}}, doi = {{10.1145/3121257.3121262}}, year = {{2017}}, } @techreport{72, abstract = {{Software verification competitions, such as the annual SV-COMP, evaluate software verification tools with respect to their effectivity and efficiency. Typically, the outcome of a competition is a (possibly category-specific) ranking of the tools. For many applications, such as building portfolio solvers, it would be desirable to have an idea of the (relative) performance of verification tools on a given verification task beforehand, i.e., prior to actually running all tools on the task.In this paper, we present a machine learning approach to predicting rankings of tools on verification tasks. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for verification tasks. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy. In particular, our method outperforms a recently proposed feature-based approach of Demyanova et al. (when applied to rank predictions). }}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, title = {{{Predicting Rankings of Software Verification Competitions}}}, year = {{2017}}, } @article{68, abstract = {{Proof-carrying hardware (PCH) is a principle for achieving safety for dynamically reconfigurable hardware systems. The producer of a hardware module spends huge effort when creating a proof for a safety policy. The proof is then transferred as a certificate together with the configuration bitstream to the consumer of the hardware module, who can quickly verify the given proof. Previous work utilized SAT solvers and resolution traces to set up a PCH technology and corresponding tool flows. In this article, we present a novel technology for PCH based on inductive invariants. For sequential circuits, our approach is fundamentally stronger than the previous SAT-based one since we avoid the limitations of bounded unrolling. We contrast our technology to existing ones and show that it fits into previously proposed tool flows. We conduct experiments with four categories of benchmark circuits and report consumer and producer runtime and peak memory consumption, as well as the size of the certificates and the distribution of the workload between producer and consumer. Experiments clearly show that our new induction-based technology is superior for sequential circuits, whereas the previous SAT-based technology is the better choice for combinational circuits.}}, author = {{Isenberg, Tobias and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}}, journal = {{ACM Transactions on Design Automation of Electronic Systems}}, number = {{4}}, pages = {{61:1----61:23}}, publisher = {{ACM}}, title = {{{Proof-Carrying Hardware via Inductive Invariants}}}, doi = {{10.1145/3054743}}, year = {{2017}}, } @phdthesis{685, author = {{Jakobs, Marie-Christine}}, publisher = {{Universität Paderborn}}, title = {{{On-The-Fly Safety Checking - Customizing Program Certification and Program Restructuring}}}, doi = {{10.17619/UNIPB/1-104}}, year = {{2017}}, } @article{69, abstract = {{Today, software is traded worldwide on global markets, with apps being downloaded to smartphones within minutes or seconds. This poses, more than ever, the challenge of ensuring safety of software in the face of (1) unknown or untrusted software providers together with (2) resource-limited software consumers. The concept of Proof-Carrying Code (PCC), years ago suggested by Necula, provides one framework for securing the execution of untrusted code. PCC techniques attach safety proofs, constructed by software producers, to code. Based on the assumption that checking proofs is usually much simpler than constructing proofs, software consumers should thus be able to quickly check the safety of software. However, PCC techniques often suffer from the size of certificates (i.e., the attached proofs), making PCC techniques inefficient in practice.In this article, we introduce a new framework for the safe execution of untrusted code called Programs from Proofs (PfP). The basic assumption underlying the PfP technique is the fact that the structure of programs significantly influences the complexity of checking a specific safety property. Instead of attaching proofs to program code, the PfP technique transforms the program into an efficiently checkable form, thus guaranteeing quick safety checks for software consumers. For this transformation, the technique also uses a producer-side automatic proof of safety. More specifically, safety proving for the software producer proceeds via the construction of an abstract reachability graph (ARG) unfolding the control-flow automaton (CFA) up to the degree necessary for simple checking. To this end, we combine different sorts of software analysis: expensive analyses incrementally determining the degree of unfolding, and cheap analyses responsible for safety checking. Out of the abstract reachability graph we generate the new program. In its CFA structure, it is isomorphic to the graph and hence another, this time consumer-side, cheap analysis can quickly determine its safety.Like PCC, Programs from Proofs is a general framework instantiable with different sorts of (expensive and cheap) analysis. Here, we present the general framework and exemplify it by some concrete examples. We have implemented different instantiations on top of the configurable program analysis tool CPAchecker and report on experiments, in particular on comparisons with PCC techniques.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, journal = {{ACM Transactions on Programming Languages and Systems}}, number = {{2}}, pages = {{7:1--7:56}}, publisher = {{ACM}}, title = {{{Programs from Proofs: A Framework for the Safe Execution of Untrusted Software}}}, doi = {{10.1145/3014427}}, year = {{2017}}, } @misc{109, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Cooperative static analysis of Android applications}}}, year = {{2017}}, } @misc{201, author = {{Bröcher, Henrik}}, publisher = {{Universität Paderborn}}, title = {{{Evaluation von Graphpartitionierungsalgorithmen im Kontext von Konfigurierbarer Softwarezertifizierung}}}, year = {{2016}}, } @inproceedings{3157, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Critical Systems: Formal Methods and Automated Verification - Joint 21st International Workshop on Formal Methods for Industrial Critical Systems and 16th International Workshop on Automated Verification of Critical Systems, FMICS-AVoCS 2016, Pisa, Italy, September 26-28, 2016, Proceedings}}, editor = {{H. ter Beek, Maurice and Gnesi, Stefania and Knapp, Alexander}}, pages = {{189----204}}, title = {{{Verification of AUTOSAR Software Architectures with Timed Automata}}}, doi = {{10.1007/978-3-319-45943-1_13}}, year = {{2016}}, } @inproceedings{3158, author = {{Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2016 - 13th International Colloquium, Taipei, Taiwan, ROC, October 24-31, 2016, Proceedings}}, editor = {{Sampaio, Augusto and Wang, Farn}}, pages = {{3----24}}, title = {{{Verification of Concurrent Programs on Weak Memory Models}}}, doi = {{10.1007/978-3-319-46750-4_1}}, year = {{2016}}, }