@inproceedings{408, abstract = {{Verification of hardware and software usually proceeds separately, software analysis relying on the correctness of processors executing instructions. This assumption is valid as long as the software runs on standard CPUs that have been extensively validated and are in wide use. However, for processors exploiting custom instruction set extensions to meet performance and energy constraints the validation might be less extensive, challenging the correctness assumption.In this paper we present an approach for integrating software analyses with hardware verification, specifically targeting custom instruction set extensions. We propose three different techniques for deriving the properties to be proven for the hardware implementation of a custom instruction in order to support software analyses. The techniques are designed to explore the trade-off between generality and efficiency and span from proving functional equivalence over checking the rules of a particular analysis domain to verifying actual pre and post conditions resulting from program analysis. We demonstrate and compare the three techniques on example programs with custom instructions, using stateof-the-art software and hardware verification techniques.}}, author = {{Jakobs, Marie-Christine and Platzner, Marco and Wiersema, Tobias and Wehrheim, Heike}}, booktitle = {{Proceedings of the 11th International Conference on Integrated Formal Methods (iFM)}}, editor = {{Albert, Elvira and Sekerinski, Emil}}, pages = {{307--322}}, title = {{{Integrating Software and Hardware Verification}}}, doi = {{10.1007/978-3-319-10181-1_19}}, year = {{2014}}, } @inproceedings{417, abstract = {{Model transformation is a key concept in modeldrivensoftware engineering. The definition of model transformationsis usually based on meta-models describing the abstractsyntax of languages. While meta-models are thereby able to abstractfrom superfluous details of concrete syntax, they often loosestructural information inherent in languages, like information onmodel elements always occurring together in particular shapes.As a consequence, model transformations cannot naturally re-uselanguage structures, thus leading to unnecessary complexity intheir development as well as analysis.In this paper, we propose a new approach to model transformationdevelopment which allows to simplify and improve thequality of the developed transformations via the exploitation ofthe languages’ structures. The approach is based on context-freegrammars and transformations defined by pairing productions ofsource and target grammars. We show that such transformationsexhibit three important characteristics: they are sound, completeand deterministic.}}, author = {{Besova, Galina and Steenke, Dominik and Wehrheim, Heike}}, booktitle = {{Proceedings 3rd Workshop on Model Driven Approaches in System Development (MDASD)}}, pages = {{1601--1610}}, title = {{{Grammar-based model transformations}}}, doi = {{10.1016/j.cl.2015.05.003}}, year = {{2014}}, } @misc{418, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Generierung von Eigenschaftsprüfern in einem Hardware/Software-Co-Verifikationsverfahren}}}, year = {{2014}}, } @inproceedings{3176, author = {{Isenberg, Tobias and Steenken, Dominik and Wehrheim, Heike}}, booktitle = {{Formal Techniques for Distributed Systems - Joint {IFIP} {WG} 6.1 International Conference, {FMOODS/FORTE} 2013, Held as Part of the 8th International Federated Conference on Distributed Computing Techniques, DisCoTec 2013, Florence, Italy, June 3-5, 2013. Proceedings}}, editor = {{Beyer, Dirk and Boreale, Michele}}, pages = {{178----192}}, title = {{{Bounded Model Checking of Graph Transformation Systems via {SMT} Solving}}}, doi = {{10.1007/978-3-642-38592-6_13}}, year = {{2013}}, } @inproceedings{3177, author = {{Travkin, Oleg and Mütze, Annika and Wehrheim, Heike}}, booktitle = {{Hardware and Software: Verification and Testing - 9th International Haifa Verification Conference, {HVC} 2013, Haifa, Israel, November 5-7, 2013, Proceedings}}, editor = {{Bertacco, Valeria and Legay, Axel}}, pages = {{311----326}}, title = {{{{SPIN} as a Linearizability Checker under Weak Memory Models}}}, doi = {{10.1007/978-3-319-03077-7_21}}, year = {{2013}}, } @inproceedings{3178, author = {{Dongol, Brijesh and Travkin, Oleg and Derrick, John and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2013 - 10th International Colloquium, Shanghai, China, September 4-6, 2013. Proceedings}}, editor = {{Liu, Zhiming and Woodcock, Jim and Zhu, Huibiao}}, pages = {{177----194}}, title = {{{A High-Level Semantics for Program Execution under Total Store Order Memory}}}, doi = {{10.1007/978-3-642-39718-9_11}}, year = {{2013}}, } @inproceedings{3179, author = {{Ziegert, Steffen and Wehrheim, Heike}}, booktitle = {{Software Engineering 2013: Fachtagung des GI-Fachbereichs Softwaretechnik, 26. Februar - 2. M{\"{a}}rz 2013 in Aachen}}, editor = {{Kowalewski, Stefan and Rumpe, Bernhard}}, pages = {{271----284}}, title = {{{Temporal Reconfiguration Plans for Self-Adaptive Systems}}}, year = {{2013}}, } @inproceedings{469, abstract = {{Runtime monitoring aims at ensuring program safety by monitoring the program's behaviour during execution and taking appropriate action before a program violates some property.Runtime monitoring is in particular important when an exhaustive formal verification fails. While the approach allows for a safe execution of programs, it may impose a significant runtime overhead.In this paper, we propose a novel technique combining verification and monitoring which incurs no overhead during runtime at all. The technique proceeds by using the inconclusive result of a verification run as the basis for transforming the program into one where all potential points of failure are replaced by HALT statements. The new program is safe by construction, behaviourally equivalent to the original program (except for unsafe behaviour),and has the same performance characteristics.}}, author = {{Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike}}, booktitle = {{Proceedings of the 11th International Conference on Software Engineering and Formal Methods (SEFM)}}, pages = {{244--258}}, title = {{{Zero Overhead Runtime Monitoring}}}, doi = {{10.1007/978-3-642-40561-7_17}}, year = {{2013}}, } @phdthesis{478, abstract = {{Software systems are playing an increasing role in our everyday life, and as the amount of software applications grows, so does their complexity and the relevance of their computations. Software components can be found in many systems that are charged with safety-critical tasks, such as control systems for aviation or power plants. Hence, software verification techniques that are capable of proving the absence of critical errors are becoming more and more important in the field software engineering. A well-established approach to software verification is model checking. Applying this technique involves an exhaustive exploration of a state space model corresponding to the system under consideration. The major challenge in model checking is the so-called state explosion problem: The state space of a software system grows exponentially with its size. Thus, the straightforward modelling of real-life systems practically impossible. A common approach to this problem is the application of abstraction techniques, which reduce the original state space by mapping it on a significantly smaller abstract one. Abstraction inherently involves a loss of information, and thus, the resulting abstract model may be too imprecise for a definite result in verification. Therefore, abstraction is typically combined with abstraction refinement: An initially very coarse abstract model is iteratively refined, i.e. enriched with new details about the original system, until a level of abstraction is reached that is precise enough for a definite outcome. Abstraction refinement-based model checking is fully automatable and it is considered as one of the most promising approaches to the state explosion problem in verification. However, it is still faced with a number of challenges. There exist several types of abstraction techniques and not every type is equally well-suited for all kinds of systems and verification tasks. Moreover, the selection of adequate refinement steps is nontrivial and typically the most crucial part of the overall approach: Unfavourable refinement decisions can compromise the state space-reducing effect of abstraction, and as a consequence, can easily lead to the failure of verification. It is, however, hard to predict which refinement steps will eventually be expedient for verification – and which not.}}, author = {{Timm, Nils}}, publisher = {{Universität Paderborn}}, title = {{{Three-Valued Abstraction and Heuristic-Guided Refinement for Verifying Concurrent Systems}}}, year = {{2013}}, } @inproceedings{498, abstract = {{Proof-carrying code approaches aim at safe execution of untrusted code by having the code producer attach a safety proof to the code which the code consumer only has to validate. Depending on the type of safety property, proofs can however become quite large and their validation - though faster than their construction - still time consuming. In this paper we introduce a new concept for safe execution of untrusted code. It keeps the idea of putting the time consuming part of proving on the side of the code producer, however, attaches no proofs to code anymore but instead uses the proof to transform the program into an equivalent but more efficiently verifiable program. Code consumers thus still do proving themselves, however, on a computationally inexpensive level only. Experimental results show that the proof effort can be reduced by several orders of magnitude, both with respect to time and space.}}, author = {{Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike}}, booktitle = {{Proceedings of the 25th International Conference on Computer Aided Verification (CAV)}}, pages = {{912--927}}, title = {{{Programs from Proofs – A PCC Alternative}}}, doi = {{10.1007/978-3-642-39799-8_65}}, year = {{2013}}, } @inproceedings{517, abstract = {{In the Semantic (Web) Services area, services are considered black boxes with a semantic description of their interfaces as to allow for precise service selection and configuration. The semantic description is usually grounded on domain-specific concepts as modeled in ontologies. This accounts for types used in service signatures, but also predicates occurring in preconditions and effects of services. Ontologies, in particular those enhanced with rules, capture the knowledge of domain experts on properties of and relations between domain concepts. In this paper, we present a verification technique for service compositions which makes use of this domain knowledge. We consider a service composition to be an assembly of services of which we just know signatures, preconditions, and effects. We aim at proving that a composition satisfies a (user-defined) requirement, specified in terms of guaranteed preconditions and required postconditions. As an underlying verification engine we use an SMT solver. To take advantage of the domain knowledge (and often, to enable verification at all), the knowledge is fed into the solver in the form of sorts, uninterpreted functions and in particular assertions as to enhance the solver’s reasoning capabilities. Thereby, we allow for deductions within a domain previously unknown to the solver. We exemplify our technique on a case study from the area of water network optimization software.}}, author = {{Walther, Sven and Wehrheim, Heike}}, booktitle = {{Proceedings of the 18th IEEE International Conference on Engineering of Complex Computer Systems (ICECCS)}}, pages = {{24 -- 32 }}, title = {{{Knowledge-Based Verification of Service Compositions - An SMT approach}}}, doi = {{10.1109/ICECCS.2013.14}}, year = {{2013}}, } @article{3180, author = {{Travkin, Oleg and Wehrheim, Heike and Schellhorn, Gerhard}}, journal = {{{ECEASST}}}, title = {{{Proving Linearizability of Multiset with Local Proof Obligations}}}, year = {{2012}}, } @article{3181, author = {{Ruhroth, Thomas and Wehrheim, Heike}}, journal = {{Sci. Comput. Program.}}, number = {{3}}, pages = {{270----289}}, title = {{{Model evolution and refinement}}}, doi = {{10.1016/j.scico.2011.04.007}}, year = {{2012}}, } @inproceedings{3182, author = {{Schellhorn, Gerhard and Wehrheim, Heike and Derrick, John}}, booktitle = {{Computer Aided Verification - 24th International Conference, {CAV} 2012, Berkeley, CA, USA, July 7-13, 2012 Proceedings}}, editor = {{Madhusudan, P. and A. Seshia, Sanjit}}, pages = {{243----259}}, title = {{{How to Prove Algorithms Linearisable}}}, doi = {{10.1007/978-3-642-31424-7_21}}, year = {{2012}}, } @inproceedings{590, abstract = {{Predicate abstraction is an established technique for reducing the size of the state space during verification. In this paper, we extend predication abstraction with block-abstraction memoization (BAM), which exploits the fact that blocks are often executed several times in a program. The verification can thus benefit from caching the values of previous block analyses and reusing them upon next entry into a block. In addition to function bodies, BAM also performs well for nested loops. To further increase effectiveness, block memoization has been integrated with lazy abstraction adopting a lazy strategy for cache refinement. Together, this achieves significant performance increases: our tool (an implementation within the configurable program analysis framework CPAchecker) has won the Competition on Software Verification 2012 in the category “Overall”.}}, author = {{Wonisch, Daniel and Wehrheim, Heike}}, booktitle = {{Proceedings of the 14th International Conference on Formal Engineering Methods (ICFEM)}}, pages = {{332--347}}, title = {{{Predicate Analysis with Block-Abstraction Memoization}}}, doi = {{10.1007/978-3-642-34281-3_24}}, year = {{2012}}, } @inproceedings{608, abstract = {{Predicate abstraction is an established technique in software verification. It inherently includes an abstraction refinement loop successively adding predicates until the right level of abstraction is found. For concurrent systems, predicate abstraction can be combined with spotlight abstraction, further reducing the state space by abstracting away certain processes. Refinement then has to decide whether to add a new predicate or a new process. Selecting the right predicates and processes is a crucial task: The positive effect of abstraction may be compromised by unfavourable refinement decisions. Here we present a heuristic approach to abstraction refinement. The basis for a decision is a set of refinement candidates, derived by multiple counterexample-generation. Candidates are evaluated with respect to their influence on other components in the system. Experimental results show that our technique can significantly speed up verification as compared to a naive abstraction refinement.}}, author = {{Timm, Nils and Wehrheim, Heike and Czech, Mike}}, booktitle = {{Proceedings of the 14th International Conference on Formal Engineering Methods (ICFEM)}}, pages = {{348--363}}, title = {{{Heuristic-Guided Abstraction Refinement for Concurrent Systems}}}, doi = {{10.1007/978-3-642-34281-3_25}}, year = {{2012}}, } @inproceedings{627, abstract = {{Block Abstraction Memoization (ABM) is a technique in software model checking that exploits the modularity of programs during verification by caching. To this end, ABM records the results of block analyses and reuses them if possible when revisiting the same block again. In this paper we present an implementation of ABM into the predicate-analysis component of the software-verification framework CPAchecker. With our participation at the Competition on Software Verification we aim at providing evidence that ABM can not only substantially increase the efficiency of predicate analysis but also enables verification of a wider range of programs.}}, author = {{Wonisch, Daniel}}, booktitle = {{Proceedings of the 18th International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS)}}, pages = {{531--533}}, title = {{{Block Abstraction Memoization for CPAchecker}}}, doi = {{10.1007/978-3-642-28756-5_41}}, year = {{2012}}, } @inproceedings{565, abstract = {{In model-driven development of multi-layer systems (e.g. application, platform and infrastructure), each layer is usually described by separate models. When generating analysis models or code, these separate models rst of all need to be linked. Hence, existing model transformations for single layers cannot be simply re-used. In this paper, we present a modular approach to the transformation of multi-layer systems. It employs model weaving to dene the interconnections between models of dierent layers. The weaving models themselves are subject to model transformations: The result of transforming a weaving model constitutes a conguration for the models obtained by transforming single layers, thereby allowing for a re-use of existing model transformations. We exemplify our approach by the generation of analysis models for component-based software.}}, author = {{Besova, Galina and Walther, Sven and Wehrheim, Heike and Becker, Steffen}}, booktitle = {{Proceedings of the 15th International Conference on Model Driven Engineering Languages & Systems (MoDELS)}}, pages = {{776--792}}, title = {{{Weaving-based configuration and modular transformation of multi-layer systems}}}, doi = {{10.1007/978-3-642-33666-9_49}}, year = {{2012}}, } @article{3183, author = {{Schneider, Steve and Treharne, Helen and Wehrheim, Heike}}, journal = {{Electr. Notes Theor. Comput. Sci.}}, pages = {{69----80}}, title = {{{Bounded Retransmission in Event-B{\(\parallel\)}CSP: a Case Study}}}, doi = {{10.1016/j.entcs.2011.11.019}}, year = {{2011}}, } @article{3184, author = {{Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, journal = {{{ACM} Trans. Program. Lang. Syst.}}, number = {{1}}, pages = {{4:1----4:43}}, title = {{{Mechanically verified proof obligations for linearizability}}}, doi = {{10.1145/1889997.1890001}}, year = {{2011}}, }