@inproceedings{3159, author = {{Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Integrated Formal Methods - 12th International Conference, {IFM} 2016, Reykjavik, Iceland, June 1-5, 2016, Proceedings}}, editor = {{Huisman, Marieke}}, pages = {{193----209}}, title = {{{Towards a Thread-Local Proof Technique for Starvation Freedom}}}, doi = {{10.1007/978-3-319-33693-0_13}}, year = {{2016}}, } @inproceedings{3160, author = {{Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{20th International Conference on Principles of Distributed Systems, {OPODIS} 2016, December 13-16, 2016, Madrid, Spain}}, editor = {{Fatourou, Panagiota and Jim{\'{e}}nez, Ernesto and Pedone, Fernando}}, pages = {{35:1----35:17}}, title = {{{Proving Opacity of a Pessimistic {STM}}}}, doi = {{10.4230/LIPIcs.OPODIS.2016.35}}, year = {{2016}}, } @article{3161, author = {{Isenberg, Tobias and Jakobs, Marie{-}Christine and Pauck, Felix and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Deriving approximation tolerance constraints from verification runs}}}, year = {{2016}}, } @article{175, abstract = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning.In this paper, we propose a method for constructing service compositions based on pre-verified templates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Their meaning is defined by an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Templates are proven correct using a Hoare-style proof calculus, extended by a specific rule for service calls. Construction of service compositions amounts to instantiation of templates with domain-specific services. Correctness of an instantiation can then simply be checked by verifying that the domain ontology (a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}}, author = {{Walther, Sven and Wehrheim, Heike}}, journal = {{Science of Computer Programming}}, pages = {{2----23}}, publisher = {{Elsevier}}, title = {{{On-The-Fly Construction of Provably Correct Service Compositions - Templates and Proofs}}}, doi = {{10.1016/j.scico.2016.04.002}}, year = {{2016}}, } @inproceedings{186, abstract = {{Software verification is an established method to ensure software safety. Nevertheless, verification still often fails, either because it consumes too much resources, e.g., time or memory, or the technique is not mature enough to verify the property. Often then discarding the partial verification, the validation process proceeds with techniques like testing.To enable standard testing to profit from previous, partial verification, we use a summary of the verification effort to simplify the program for subsequent testing. Our techniques use this summary to construct a residual program which only contains program paths with unproven assertions. Afterwards, the residual program can be used with standard testing tools.Our first experiments show that testing profits from the partial verification.The test effort is reduced and combined verification and testing is faster than a complete verification.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Software Engineering 2016}}, editor = {{Jens Knoop, Uwe Zdun}}, pages = {{17--18}}, title = {{{Just test what you cannot verify!}}}, year = {{2016}}, } @inproceedings{224, abstract = {{In modern software development, paradigms like component-based software engineering (CBSE) and service-oriented architectures (SOA) emphasize the construction of large software systems out of existing components or services. Therein, a service is a self-contained piece of software, which adheres to a specified interface. In a model-based software design, this interface constitutes our sole knowledge of the service at design time, while service implementations are not available. Therefore, correctness checks or detection of potential errors in service compositions has to be carried out without the possibility of executing services. This challenges the usage of standard software error localization techniques for service compositions. In this paper, we review state-of-the-art approaches for error localization of software and discuss their applicability to service compositions.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC 2016)}}, pages = {{248----262}}, title = {{{A short survey on using software error localization for service compositions}}}, doi = {{10.1007/978-3-319-44482-6_16}}, year = {{2016}}, } @inproceedings{226, abstract = {{Error detection, localization and correction are time-intensive tasks in software development, but crucial to deliver functionally correct products. Thus, automated approaches to these tasks have been intensively studied for standard software systems. For model-based software systems, the situation is different. While error detection is still well-studied, error localization and correction is a less-studied domain. In this paper, we examine error localization and correction for models of service compositions. Based on formal definitions of error and correction in this context, we show that the classical approach of error localization and correction, i.e. first determining a set of suspicious statements and then proposing changes to these statements, is ineffective in our context. In fact, it lessens the chance to succeed in finding a correction at all.In this paper, we introduce correction proposal as a novel approach on error correction in service compositions integrating error localization and correction in one combined step. In addition, we provide an algorithm to compute such correction proposals automatically.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 1st International Workshop on Formal to Practical Software Verification and Composition (VeryComp 2016)}}, pages = {{445----457}}, title = {{{A Formal Approach to Error Localization and Correction in Service Compositions}}}, doi = {{10.1007/978-3-319-50230-4_35}}, year = {{2016}}, } @inproceedings{227, abstract = {{Information flow analysis studies the flow of data between program entities (e.g. variables), where the allowed flow is specified via security policies. Typical information flow analyses compute a conservative (over-)approximation of the flows in a program. Such an analysis may thus signal non-existing violations of the security policy.In this paper, we propose a new technique for inspecting the reported violations (counterexamples) for spuriousity. Similar to counterexample-guided-abstraction-refinement (CEGAR) in software verification, we use the result of this inspection to improve the next round of the analysis. We prove soundness of this scheme.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Proceedings of the 18th International Conference on Formal Engineering Methods (ICFEM 2016)}}, pages = {{466----483}}, title = {{{A CEGAR Scheme for Information Flow Analysis}}}, doi = {{10.1007/978-3-319-47846-3_29}}, year = {{2016}}, } @inproceedings{170, abstract = {{We present PAndA2, an extendable, static analysis tool for Android apps which examines permission related security threats like overprivilege, existence of permission redelegation and permission flows. PAndA2 comes along with a textual and graphical visualization of the analysis result and even supports the comparison of analysis results for different android app versions.}}, author = {{Jakobs, Marie-Christine and Töws, Manuel and Pauck, Felix}}, booktitle = {{Workshop on Formal and Model-Driven Techniques for Developing Trustworthy Systems}}, editor = {{Ishikawa F, Romanovsky A, Troubitsyna E}}, title = {{{PAndA 2 : Analyzing Permission Use and Interplay in Android Apps (Tool Paper)}}}, year = {{2016}}, } @phdthesis{1190, author = {{Isenberg, Tobias}}, publisher = {{Universität Paderborn}}, title = {{{Induction-based Verification of Timed Systems}}}, year = {{2016}}, } @misc{162, author = {{Zhang, Guangli}}, publisher = {{Universität Paderborn}}, title = {{{Program Slicing: A Way of Separating WHILE Programs into Precise and Approximate Portions}}}, year = {{2016}}, } @misc{164, author = {{Czech, Mike}}, publisher = {{Universität Paderborn}}, title = {{{Predicting Rankings of Software Verification Tools Using Kernels for Structured Data}}}, year = {{2016}}, } @misc{133, abstract = {{.}}, author = {{Dewender, Markus}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Spin}}}, year = {{2016}}, } @misc{134, abstract = {{.}}, author = {{Heinisch, Philipp}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Prolog}}}, year = {{2016}}, } @inproceedings{250, abstract = {{Before execution, users should formally validate the correctness of software received from untrusted providers. To accelerate this validation, in the proof carrying code (PCC) paradigm the provider delivers the software together with a certificate, a formal proof of the software’s correctness. Thus, the user only checks if the attached certificate shows correctness of the delivered software.Recently, we introduced configurable program certification, a generic, PCC based framework supporting various software analyses and safety properties. Evaluation of our framework revealed that validation suffers from certificate reading. In this paper, we present two orthogonal approaches which improve certificate validation, both reducing the impact of certificate reading. The first approach reduces the certificate size, storing information only if it cannot easily be recomputed. The second approach partitions the certificate into independently checkable parts. The trick is to read parts of the certificate while already checking read parts. Our experiments show that validation highly benefits from our improvements.}}, author = {{Jakobs, Marie-Christine}}, booktitle = {{Proceedings of the 13th International Conference on Software Engineering and Formal Methods (SEFM)}}, pages = {{159----174}}, title = {{{Speed Up Configurable Certificate Validation by Certificate Reduction and Partitioning}}}, doi = {{10.1007/978-3-319-22969-0_12}}, year = {{2015}}, } @inproceedings{283, abstract = {{Today, software verification is an established analysis method which can provide high guarantees for software safety. However, the resources (time and/or memory) for an exhaustive verification are not always available, and analysis then has to resort to other techniques, like testing. Most often, the already achieved partial verification results arediscarded in this case, and testing has to start from scratch.In this paper, we propose a method for combining verification and testing in which testing only needs to check the residual fraction of an uncompleted verification. To this end, the partial results of a verification run are used to construct a residual program (and residual assertions to be checked on it). The residual program can afterwards be fed into standardtesting tools. The proposed technique is sound modulo the soundness of the testing procedure. Experimental results show that this combinedusage of verification and testing can significantly reduce the effort for the subsequent testing.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Fundamental Approaches to Software Engineering}}, editor = {{Egyed, Alexander and Schaefer, Ina}}, pages = {{100--114}}, title = {{{Just test what you cannot verify!}}}, doi = {{10.1007/978-3-662-46675-9_7}}, year = {{2015}}, } @inproceedings{285, abstract = {{We propose an incremental workflow for the verification of parameterized systems modeled as symmetric networks of timed automata. Starting with a small number of timed automata in the network, a safety property is verified using IC3, a state-of-the-art algorithm based on induction.The result of the verification, an inductive strengthening, is reused proposing a candidate inductive strengthening for a larger network.If the candidate is valid, our main theorem states that the safety property holds for all sizes of the network of timed automata. Otherwise the number of automata is increased and the next iteration is started with a new run of IC3.We propose and thoroughly examine optimizations to our workflow, e.g. Feedback mechanisms to speed up the run of IC3.}}, author = {{Isenberg, Tobias}}, booktitle = {{Proceedings of the 15th International Conference on Application of Concurrency to System Design (ACSD)}}, pages = {{1--9 }}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1109/ACSD.2015.13}}, year = {{2015}}, } @phdthesis{246, author = {{Besova, Galina}}, publisher = {{Universität Paderborn}}, title = {{{Systematic Development and Re-Use of Model Tranformations}}}, year = {{2015}}, } @inproceedings{262, abstract = {{Programs from Proofs" is a generic method which generates new programs out of correctness proofs of given programs. The technique ensures that the new and given program are behaviorally equivalent and that the new program is easily verifiable, thus serving as an alternative to proof-carrying code concepts. So far, this generic method has one instantiation that verifies type-state properties of programs. In this paper, we present a whole range of new instantiations, all based on data ow analyses. More precisely, we show how an imprecise but fast data ow analysis can be enhanced with a predicate analysis as to yield a precise but expensive analysis. Out of the safety proofs of this analysis, we generate new programs, again behaviorally equivalent to the given ones, which are easily verifiable" in the sense that now the data ow analysis alone can yield precise results. An experimental evaluation practically supports our claim of easy verification.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 30th Annual ACM Symposium on Applied Computing}}, pages = {{1729--1736}}, title = {{{Programs from Proofs of Predicated Dataflow Analyses}}}, doi = {{10.1145/2695664.2695690}}, year = {{2015}}, } @article{290, abstract = {{Model transformation is a key concept in model-driven software engineering. The definition of model transformations is usually based on meta-models describing the abstract syntax of languages. While meta-models are thereby able to abstract from uperfluous details of concrete syntax, they often loose structural information inherent in languages, like information on model elements always occurring together in particular shapes. As a consequence, model transformations cannot naturally re-use language structures, thus leading to unnecessary complexity in their development as well as in quality assurance.In this paper, we propose a new approach to model transformation development which allows to simplify the developed transformations and improve their quality via the exploitation of the languages׳ structures. The approach is based on context-free graph grammars and transformations defined by pairing productions of source and target grammars. We show that such transformations have important properties: they terminate and are sound, complete, and deterministic.}}, author = {{Besova, Galina and Steenken, Dominik and Wehrheim, Heike}}, journal = {{Computer Languages, Systems & Structures}}, pages = {{116--138}}, publisher = {{Elsevier}}, title = {{{Grammar-based model transformations: Definition, execution, and quality properties}}}, doi = {{10.1016/j.cl.2015.05.003}}, year = {{2015}}, }