@phdthesis{707,
  author       = {{Walther, Sven}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Knowledge-based Verification of Service Compositions}}},
  doi          = {{10.17619/UNIPB/1-307}},
  year         = {{2017}},
}

@inproceedings{71,
  abstract     = {{Today, software verification tools have reached the maturity to be used for large scale programs. Different tools perform differently well on varying code. A software developer is hence faced with the problem of choosing a tool appropriate for her program at hand. A ranking of tools on programs could facilitate the choice. Such rankings can, however, so far only be obtained by running all considered tools on the program.In this paper, we present a machine learning approach to predicting rankings of tools on programs. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for programs. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from the software verification competition SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy (rank correlation > 0.6).}},
  author       = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 3rd International Workshop on Software Analytics}},
  pages        = {{23--26}},
  title        = {{{Predicting Rankings of Software Verification Tools}}},
  doi          = {{10.1145/3121257.3121262}},
  year         = {{2017}},
}

@techreport{72,
  abstract     = {{Software verification competitions, such as the annual SV-COMP, evaluate software verification tools with respect to their effectivity and efficiency. Typically, the outcome of a competition is a (possibly category-specific) ranking of the tools. For many applications, such as building portfolio solvers, it would be desirable to have an idea of the (relative) performance of verification tools on a given verification task beforehand, i.e., prior to actually running all tools on the task.In this paper, we present a machine learning approach to predicting rankings of tools on verification tasks. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for verification tasks. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy. In particular, our method outperforms a recently proposed feature-based approach of Demyanova et al. (when applied to rank predictions). }},
  author       = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}},
  title        = {{{Predicting Rankings of Software Verification Competitions}}},
  year         = {{2017}},
}

@article{68,
  abstract     = {{Proof-carrying hardware (PCH) is a principle for achieving safety for dynamically reconfigurable hardware systems. The producer of a hardware module spends huge effort when creating a proof for a safety policy. The proof is then transferred as a certificate together with the configuration bitstream to the consumer of the hardware module, who can quickly verify the given proof. Previous work utilized SAT solvers and resolution traces to set up a PCH technology and corresponding tool flows. In this article, we present a novel technology for PCH based on inductive invariants. For sequential circuits, our approach is fundamentally stronger than the previous SAT-based one since we avoid the limitations of bounded unrolling. We contrast our technology to existing ones and show that it fits into previously proposed tool flows. We conduct experiments with four categories of benchmark circuits and report consumer and producer runtime and peak memory consumption, as well as the size of the certificates and the distribution of the workload between producer and consumer. Experiments clearly show that our new induction-based technology is superior for sequential circuits, whereas the previous SAT-based technology is the better choice for combinational circuits.}},
  author       = {{Isenberg, Tobias and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}},
  journal      = {{ACM Transactions on Design Automation of Electronic Systems}},
  number       = {{4}},
  pages        = {{61:1----61:23}},
  publisher    = {{ACM}},
  title        = {{{Proof-Carrying Hardware via Inductive Invariants}}},
  doi          = {{10.1145/3054743}},
  year         = {{2017}},
}

@phdthesis{685,
  author       = {{Jakobs, Marie-Christine}},
  publisher    = {{Universität Paderborn}},
  title        = {{{On-The-Fly Safety Checking - Customizing Program Certification and Program Restructuring}}},
  doi          = {{10.17619/UNIPB/1-104}},
  year         = {{2017}},
}

@article{69,
  abstract     = {{Today, software is traded worldwide on global markets, with apps being downloaded to smartphones within minutes or seconds. This poses, more than ever, the challenge of ensuring safety of software in the face of (1) unknown or untrusted software providers together with (2) resource-limited software consumers. The concept of Proof-Carrying Code (PCC), years ago suggested by Necula, provides one framework for securing the execution of untrusted code. PCC techniques attach safety proofs, constructed by software producers, to code. Based on the assumption that checking proofs is usually much simpler than constructing proofs, software consumers should thus be able to quickly check the safety of software. However, PCC techniques often suffer from the size of certificates (i.e., the attached proofs), making PCC techniques inefficient in practice.In this article, we introduce a new framework for the safe execution of untrusted code called Programs from Proofs (PfP). The basic assumption underlying the PfP technique is the fact that the structure of programs significantly influences the complexity of checking a specific safety property. Instead of attaching proofs to program code, the PfP technique transforms the program into an efficiently checkable form, thus guaranteeing quick safety checks for software consumers. For this transformation, the technique also uses a producer-side automatic proof of safety. More specifically, safety proving for the software producer proceeds via the construction of an abstract reachability graph (ARG) unfolding the control-flow automaton (CFA) up to the degree necessary for simple checking. To this end, we combine different sorts of software analysis: expensive analyses incrementally determining the degree of unfolding, and cheap analyses responsible for safety checking. Out of the abstract reachability graph we generate the new program. In its CFA structure, it is isomorphic to the graph and hence another, this time consumer-side, cheap analysis can quickly determine its safety.Like PCC, Programs from Proofs is a general framework instantiable with different sorts of (expensive and cheap) analysis. Here, we present the general framework and exemplify it by some concrete examples. We have implemented different instantiations on top of the configurable program analysis tool CPAchecker and report on experiments, in particular on comparisons with PCC techniques.}},
  author       = {{Jakobs, Marie-Christine and Wehrheim, Heike}},
  journal      = {{ACM Transactions on Programming Languages and Systems}},
  number       = {{2}},
  pages        = {{7:1--7:56}},
  publisher    = {{ACM}},
  title        = {{{Programs from Proofs: A Framework for the Safe Execution of Untrusted Software}}},
  doi          = {{10.1145/3014427}},
  year         = {{2017}},
}

@misc{109,
  author       = {{Pauck, Felix}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Cooperative static analysis of Android applications}}},
  year         = {{2017}},
}

@misc{201,
  author       = {{Bröcher, Henrik}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Evaluation von Graphpartitionierungsalgorithmen im Kontext von Konfigurierbarer Softwarezertifizierung}}},
  year         = {{2016}},
}

@inproceedings{3157,
  author       = {{Beringer, Steffen and Wehrheim, Heike}},
  booktitle    = {{Critical Systems: Formal Methods and Automated Verification - Joint 21st International Workshop on Formal Methods for Industrial Critical Systems and 16th International Workshop on Automated Verification of Critical Systems, FMICS-AVoCS 2016, Pisa, Italy, September 26-28, 2016, Proceedings}},
  editor       = {{H. ter Beek, Maurice and Gnesi, Stefania and Knapp, Alexander}},
  pages        = {{189----204}},
  title        = {{{Verification of AUTOSAR Software Architectures with Timed Automata}}},
  doi          = {{10.1007/978-3-319-45943-1_13}},
  year         = {{2016}},
}

@inproceedings{3158,
  author       = {{Travkin, Oleg and Wehrheim, Heike}},
  booktitle    = {{Theoretical Aspects of Computing - {ICTAC} 2016 - 13th International Colloquium, Taipei, Taiwan, ROC, October 24-31, 2016, Proceedings}},
  editor       = {{Sampaio, Augusto and Wang, Farn}},
  pages        = {{3----24}},
  title        = {{{Verification of Concurrent Programs on Weak Memory Models}}},
  doi          = {{10.1007/978-3-319-46750-4_1}},
  year         = {{2016}},
}

@inproceedings{3159,
  author       = {{Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}},
  booktitle    = {{Integrated Formal Methods - 12th International Conference, {IFM} 2016, Reykjavik, Iceland, June 1-5, 2016, Proceedings}},
  editor       = {{Huisman, Marieke}},
  pages        = {{193----209}},
  title        = {{{Towards a Thread-Local Proof Technique for Starvation Freedom}}},
  doi          = {{10.1007/978-3-319-33693-0_13}},
  year         = {{2016}},
}

@inproceedings{3160,
  author       = {{Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}},
  booktitle    = {{20th International Conference on Principles of Distributed Systems, {OPODIS} 2016, December 13-16, 2016, Madrid, Spain}},
  editor       = {{Fatourou, Panagiota and Jim{\'{e}}nez, Ernesto and Pedone, Fernando}},
  pages        = {{35:1----35:17}},
  title        = {{{Proving Opacity of a Pessimistic {STM}}}},
  doi          = {{10.4230/LIPIcs.OPODIS.2016.35}},
  year         = {{2016}},
}

@article{3161,
  author       = {{Isenberg, Tobias and Jakobs, Marie{-}Christine and Pauck, Felix and Wehrheim, Heike}},
  journal      = {{CoRR}},
  title        = {{{Deriving approximation tolerance constraints from verification runs}}},
  year         = {{2016}},
}

@article{175,
  abstract     = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning.In this paper, we propose a method for constructing service compositions based on pre-verified templates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Their meaning is defined by an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Templates are proven correct using a Hoare-style proof calculus, extended by a specific rule for service calls. Construction of service compositions amounts to instantiation of templates with domain-specific services. Correctness of an instantiation can then simply be checked by verifying that the domain ontology (a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}},
  author       = {{Walther, Sven and Wehrheim, Heike}},
  journal      = {{Science of Computer Programming}},
  pages        = {{2----23}},
  publisher    = {{Elsevier}},
  title        = {{{On-The-Fly Construction of Provably Correct Service Compositions - Templates and Proofs}}},
  doi          = {{10.1016/j.scico.2016.04.002}},
  year         = {{2016}},
}

@inproceedings{186,
  abstract     = {{Software verification is an established method to ensure software safety. Nevertheless, verification still often fails, either because it consumes too much resources, e.g., time or memory, or the technique is not mature enough to verify the property. Often then discarding the partial verification, the validation process proceeds with techniques like testing.To enable standard testing to profit from previous, partial verification, we use a summary of the verification effort to simplify the program for subsequent testing. Our techniques use this summary to construct a residual program which only contains program paths with unproven assertions. Afterwards, the residual program can be used with standard testing tools.Our first experiments show that testing profits from the partial verification.The test effort is reduced and combined verification and testing is faster than a complete verification.}},
  author       = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}},
  booktitle    = {{Software Engineering 2016}},
  editor       = {{Jens Knoop, Uwe Zdun}},
  pages        = {{17--18}},
  title        = {{{Just test what you cannot verify!}}},
  year         = {{2016}},
}

@inproceedings{224,
  abstract     = {{In modern software development, paradigms like component-based software engineering (CBSE) and service-oriented architectures (SOA) emphasize the construction of large software systems out of existing components or services. Therein, a service is a self-contained piece of software, which adheres to a specified interface. In a model-based software design, this interface constitutes our sole knowledge of the service at design time, while service implementations are not available. Therefore, correctness checks or detection of potential errors in service compositions has to be carried out without the possibility of executing services. This challenges the usage of standard software error localization techniques for service compositions. In this paper, we review state-of-the-art approaches for error localization of software and discuss their applicability to service compositions.}},
  author       = {{Krämer, Julia and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC 2016)}},
  pages        = {{248----262}},
  title        = {{{A short survey on using software error localization for service compositions}}},
  doi          = {{10.1007/978-3-319-44482-6_16}},
  year         = {{2016}},
}

@inproceedings{226,
  abstract     = {{Error detection, localization and correction are time-intensive tasks in software development, but crucial to deliver functionally correct products. Thus, automated approaches to these tasks have been intensively studied for standard software systems. For model-based software systems, the situation is different. While error detection is still well-studied, error localization and correction is a less-studied domain. In this paper, we examine error localization and correction for models of service compositions. Based on formal definitions of error and correction in this context, we show that the classical approach of error localization and correction, i.e. first determining a set of suspicious statements and then proposing changes to these statements, is ineffective in our context. In fact, it lessens the chance to succeed in finding a correction at all.In this paper, we introduce correction proposal as a novel approach on error correction in service compositions integrating error localization and correction in one combined step. In addition, we provide an algorithm to compute such correction proposals automatically.}},
  author       = {{Krämer, Julia and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 1st International Workshop on Formal to Practical Software Verification and Composition (VeryComp 2016)}},
  pages        = {{445----457}},
  title        = {{{A Formal Approach to Error Localization and Correction in Service Compositions}}},
  doi          = {{10.1007/978-3-319-50230-4_35}},
  year         = {{2016}},
}

@inproceedings{227,
  abstract     = {{Information flow analysis studies the flow of data between program entities (e.g. variables), where the allowed flow is specified via security policies. Typical information flow analyses compute a conservative (over-)approximation of the flows in a program. Such an analysis may thus signal non-existing violations of the security policy.In this paper, we propose a new technique for inspecting the reported violations (counterexamples) for spuriousity. Similar to counterexample-guided-abstraction-refinement (CEGAR) in software verification, we use the result of this inspection to improve the next round of the analysis. We prove soundness of this scheme.}},
  author       = {{Töws, Manuel and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 18th International Conference on Formal Engineering Methods (ICFEM 2016)}},
  pages        = {{466----483}},
  title        = {{{A CEGAR Scheme for Information Flow Analysis}}},
  doi          = {{10.1007/978-3-319-47846-3_29}},
  year         = {{2016}},
}

@inproceedings{170,
  abstract     = {{We present PAndA2, an extendable, static analysis tool for Android  apps  which  examines  permission  related security  threats  like overprivilege, existence of permission redelegation and permission flows. PAndA2 comes along with a textual and graphical visualization of the analysis result and even supports the comparison of analysis results for different android app versions.}},
  author       = {{Jakobs, Marie-Christine and Töws, Manuel and Pauck, Felix}},
  booktitle    = {{Workshop on Formal and Model-Driven Techniques for Developing Trustworthy Systems}},
  editor       = {{Ishikawa F, Romanovsky A, Troubitsyna E}},
  title        = {{{PAndA 2 : Analyzing Permission Use and Interplay in Android Apps (Tool Paper)}}},
  year         = {{2016}},
}

@phdthesis{1190,
  author       = {{Isenberg, Tobias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Induction-based Verification of Timed Systems}}},
  year         = {{2016}},
}

