@inproceedings{6839, author = {{Doherty, Simon and Dongol, Brijesh and Wehrheim, Heike and Derrick, John}}, booktitle = {{32nd International Symposium on Distributed Computing, {DISC} 2018, New Orleans, LA, USA, October 15-19, 2018}}, pages = {{45:1--45:3}}, title = {{{Brief Announcement: Generalising Concurrent Correctness to Weak Memory}}}, doi = {{10.4230/LIPIcs.DISC.2018.45}}, year = {{2018}}, } @article{1043, abstract = {{Approximate computing (AC) is an emerging paradigm for energy-efficient computation. The basic idea of AC is to sacrifice high precision for low energy by allowing hardware to carry out “approximately correct” calculations. This provides a major challenge for software quality assurance: programs successfully verified to be correct might be erroneous on approximate hardware. In this letter, we present a novel approach for determining under what conditions a software verification result is valid for approximate hardware. To this end, we compute the allowed tolerances for AC hardware from successful verification runs. More precisely, we derive a set of constraints which—when met by the AC hardware—guarantees the verification result to carry over to AC. On the practical side, we furthermore: 1) show how to extract tolerances from verification runs employing predicate abstraction as verification technology and 2) show how to check such constraints on hardware designs. We have implemented all techniques, and exemplify them on example C programs and a number of recently proposed approximate adders.}}, author = {{Isenberg, Tobias and Jakobs, Marie-Christine and Pauck, Felix and Wehrheim, Heike}}, issn = {{1943-0663}}, journal = {{IEEE Embedded Systems Letters}}, pages = {{22--25}}, publisher = {{Institute of Electrical and Electronics Engineers (IEEE)}}, title = {{{Validity of Software Verification Results on Approximate Hardware}}}, doi = {{10.1109/LES.2017.2758200}}, year = {{2018}}, } @inproceedings{1096, abstract = {{to appear}}, author = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}}, booktitle = {{Proceedings of the 40th International Conference on Software Engineering (ICSE)}}, location = {{Gothenburg, Sweden}}, pages = {{1182----1193}}, publisher = {{ACM}}, title = {{{Reducer-Based Construction of Conditional Verifiers}}}, year = {{2018}}, } @misc{3512, author = {{Börding, Paul}}, publisher = {{Universität Paderborn}}, title = {{{Testing Java Method Contracts}}}, year = {{2017}}, } @inproceedings{3155, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods, {ICFEM} 2017, Xi'an, China, November 13-17, 2017, Proceedings}}, editor = {{Duan, Zhenhua and Ong, Luke}}, pages = {{362----378}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @inproceedings{3156, author = {{König, Jürgen and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2017 - 14th International Colloquium, Hanoi, Vietnam, October 23-27, 2017, Proceedings}}, editor = {{Van Hung, Dang and Kapur, Deepak}}, pages = {{118----135}}, title = {{{Value-Based or Conflict-Based? Opacity Definitions for STMs}}}, doi = {{10.1007/978-3-319-67729-3_8}}, year = {{2017}}, } @inproceedings{114, abstract = {{Proof witnesses are proof artifacts showing correctness of programs wrt. safety properties. The recent past has seen a rising interest in witnesses as (a) proofs in a proof-carrying-code context, (b) certificates for the correct functioning of verification tools, or simply (c) exchange formats for (partial) verification results. As witnesses in all theses scenarios need to be stored and processed, witnesses are required to be as small as possible. However, software verification tools – the prime suppliers of witnesses – do not necessarily construct small witnesses. In this paper, we present a formal account of proof witnesses. We introduce the concept of weakenings, reducing the complexity of proof witnesses while preserving the ability of witnessing safety. We develop aweakening technique for a specific class of program analyses, and prove it to be sound. Finally, we experimentally demonstrate our weakening technique to indeed achieve a size reduction of proof witnesses.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{NASA Formal Methods: 9th International Symposium}}, editor = {{Barrett, Clark and Davies, Misty and Kahsai, Temesghen}}, pages = {{389--403}}, title = {{{Compact Proof Witnesses}}}, doi = {{10.1007/978-3-319-57288-8_28}}, year = {{2017}}, } @inproceedings{115, abstract = {{Whenever customers have to decide between different instances of the same product, they are interested in buying the best product. In contrast, companies are interested in reducing the construction effort (and usually as a consequence thereof, the quality) to gain profit. The described setting is widely known as opposed preferences in quality of the product and also applies to the context of service-oriented computing. In general, service-oriented computing emphasizes the construction of large software systems out of existing services, where services are small and self-contained pieces of software that adhere to a specified interface. Several implementations of the same interface are considered as several instances of the same service. Thereby, customers are interested in buying the best service implementation for their service composition wrt. to metrics, such as costs, energy, memory consumption, or execution time. One way to ensure the service quality is to employ certificates, which can come in different kinds: Technical certificates proving correctness can be automatically constructed by the service provider and again be automatically checked by the user. Digital certificates allow proof of the integrity of a product. Other certificates might be rolled out if service providers follow a good software construction principle, which is checked in annual audits. Whereas all of these certificates are handled differently in service markets, what they have in common is that they influence the buying decisions of customers. In this paper, we review state-of-the-art developments in certification with respect to service-oriented computing. We not only discuss how certificates are constructed and handled in service-oriented computing but also review the effects of certificates on the market from an economic perspective.}}, author = {{Jakobs, Marie-Christine and Krämer, Julia and van Straaten, Dirk and Lettmann, Theodor}}, booktitle = {{The Ninth International Conferences on Advanced Service Computing (SERVICE COMPUTATION)}}, editor = {{Marcelo De Barros, Janusz Klink,Tadeus Uhl, Thomas Prinz}}, pages = {{7--12}}, title = {{{Certification Matters for Service Markets}}}, year = {{2017}}, } @article{90, abstract = {{We propose and extend an approach for the verification of safety properties for parameterized timed systems modeled as networks of timed automata. For this task, we introduce an incremental workflow that is based on our algorithm IC3 with Zones. It proceeds in a cycle in which single models of the system are verified, and the verification results are employed for the reasoning about the entire system. Starting with the smallest instances, the verification of the safety property is carried out fast and efficient. On successful verification, the algorithm produces an inductive strengthening of the safety property. We reuse this result and try to reason about the entire parameterized timed system. To this end, we extrapolate the inductive strengthening into a candidate for the next-larger model. In case this candidate is a valid inductive strengthening for the next larger model, our main theorem reasons about all models of the parameterized timed system, stating that the safety property holds true for all models. Otherwise, the main cycle starts over with the verification of the next larger model. This workflow is iterated indefinitely, until able to reason about the entire parameterized timed system, until a counterexample trace is found, or until the single models become too large to be handled in the verification. We reuse the intermediate results in a Feedback-loop in order to accelerate the verification runs for the single models. Furthermore, we consider an extended formalism in comparison to our previous publications.}}, author = {{Isenberg, Tobias}}, journal = {{ACM Transactions on Embedded Computing Systems}}, number = {{2}}, pages = {{47:1--47:24}}, publisher = {{ACM}}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1145/2984640}}, year = {{2017}}, } @inproceedings{5769, abstract = {{Information Flow Analysis (IFA) aims at detecting illegal flows of information between program entities. “Legality” is therein specified in terms of various security policies. For the analysis, this opens up two possibilities: building generic, policy independent and building specific, policy dependent IFAs. While the former needs to track all dependencies between program entities, the latter allows for a reduced and thus more efficient analysis. In this paper, we start out by formally defining a policy independent information flow analysis. Next, we show how to specialize this IFA via policy specific variable tracking, and prove soundness of the specialization. We furthermore investigate refinement relationships between policies, allowing an IFA for one policy to be employed for its refinements. As policy refinement depends on concrete program entities, we additionally propose a precomputation of policy refinement conditions, enabling an efficient refinement check for concrete programs.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Formal Methods and Software Engineering - 19th International Conference on Formal Engineering Methods (ICFEM 2017)}}, isbn = {{9783319686899}}, issn = {{0302-9743}}, pages = {{362--378}}, publisher = {{Springer International Publishing}}, title = {{{Policy Dependent and Independent Information Flow Analyses}}}, doi = {{10.1007/978-3-319-68690-5_22}}, year = {{2017}}, } @phdthesis{707, author = {{Walther, Sven}}, publisher = {{Universität Paderborn}}, title = {{{Knowledge-based Verification of Service Compositions}}}, doi = {{10.17619/UNIPB/1-307}}, year = {{2017}}, } @inproceedings{71, abstract = {{Today, software verification tools have reached the maturity to be used for large scale programs. Different tools perform differently well on varying code. A software developer is hence faced with the problem of choosing a tool appropriate for her program at hand. A ranking of tools on programs could facilitate the choice. Such rankings can, however, so far only be obtained by running all considered tools on the program.In this paper, we present a machine learning approach to predicting rankings of tools on programs. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for programs. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from the software verification competition SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy (rank correlation > 0.6).}}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 3rd International Workshop on Software Analytics}}, pages = {{23--26}}, title = {{{Predicting Rankings of Software Verification Tools}}}, doi = {{10.1145/3121257.3121262}}, year = {{2017}}, } @techreport{72, abstract = {{Software verification competitions, such as the annual SV-COMP, evaluate software verification tools with respect to their effectivity and efficiency. Typically, the outcome of a competition is a (possibly category-specific) ranking of the tools. For many applications, such as building portfolio solvers, it would be desirable to have an idea of the (relative) performance of verification tools on a given verification task beforehand, i.e., prior to actually running all tools on the task.In this paper, we present a machine learning approach to predicting rankings of tools on verification tasks. The method builds upon so-called label ranking algorithms, which we complement with appropriate kernels providing a similarity measure for verification tasks. Our kernels employ a graph representation for software source code that mixes elements of control flow and program dependence graphs with abstract syntax trees. Using data sets from SV-COMP, we demonstrate our rank prediction technique to generalize well and achieve a rather high predictive accuracy. In particular, our method outperforms a recently proposed feature-based approach of Demyanova et al. (when applied to rank predictions). }}, author = {{Czech, Mike and Hüllermeier, Eyke and Jakobs, Marie-Christine and Wehrheim, Heike}}, title = {{{Predicting Rankings of Software Verification Competitions}}}, year = {{2017}}, } @article{68, abstract = {{Proof-carrying hardware (PCH) is a principle for achieving safety for dynamically reconfigurable hardware systems. The producer of a hardware module spends huge effort when creating a proof for a safety policy. The proof is then transferred as a certificate together with the configuration bitstream to the consumer of the hardware module, who can quickly verify the given proof. Previous work utilized SAT solvers and resolution traces to set up a PCH technology and corresponding tool flows. In this article, we present a novel technology for PCH based on inductive invariants. For sequential circuits, our approach is fundamentally stronger than the previous SAT-based one since we avoid the limitations of bounded unrolling. We contrast our technology to existing ones and show that it fits into previously proposed tool flows. We conduct experiments with four categories of benchmark circuits and report consumer and producer runtime and peak memory consumption, as well as the size of the certificates and the distribution of the workload between producer and consumer. Experiments clearly show that our new induction-based technology is superior for sequential circuits, whereas the previous SAT-based technology is the better choice for combinational circuits.}}, author = {{Isenberg, Tobias and Platzner, Marco and Wehrheim, Heike and Wiersema, Tobias}}, journal = {{ACM Transactions on Design Automation of Electronic Systems}}, number = {{4}}, pages = {{61:1----61:23}}, publisher = {{ACM}}, title = {{{Proof-Carrying Hardware via Inductive Invariants}}}, doi = {{10.1145/3054743}}, year = {{2017}}, } @phdthesis{685, author = {{Jakobs, Marie-Christine}}, publisher = {{Universität Paderborn}}, title = {{{On-The-Fly Safety Checking - Customizing Program Certification and Program Restructuring}}}, doi = {{10.17619/UNIPB/1-104}}, year = {{2017}}, } @article{69, abstract = {{Today, software is traded worldwide on global markets, with apps being downloaded to smartphones within minutes or seconds. This poses, more than ever, the challenge of ensuring safety of software in the face of (1) unknown or untrusted software providers together with (2) resource-limited software consumers. The concept of Proof-Carrying Code (PCC), years ago suggested by Necula, provides one framework for securing the execution of untrusted code. PCC techniques attach safety proofs, constructed by software producers, to code. Based on the assumption that checking proofs is usually much simpler than constructing proofs, software consumers should thus be able to quickly check the safety of software. However, PCC techniques often suffer from the size of certificates (i.e., the attached proofs), making PCC techniques inefficient in practice.In this article, we introduce a new framework for the safe execution of untrusted code called Programs from Proofs (PfP). The basic assumption underlying the PfP technique is the fact that the structure of programs significantly influences the complexity of checking a specific safety property. Instead of attaching proofs to program code, the PfP technique transforms the program into an efficiently checkable form, thus guaranteeing quick safety checks for software consumers. For this transformation, the technique also uses a producer-side automatic proof of safety. More specifically, safety proving for the software producer proceeds via the construction of an abstract reachability graph (ARG) unfolding the control-flow automaton (CFA) up to the degree necessary for simple checking. To this end, we combine different sorts of software analysis: expensive analyses incrementally determining the degree of unfolding, and cheap analyses responsible for safety checking. Out of the abstract reachability graph we generate the new program. In its CFA structure, it is isomorphic to the graph and hence another, this time consumer-side, cheap analysis can quickly determine its safety.Like PCC, Programs from Proofs is a general framework instantiable with different sorts of (expensive and cheap) analysis. Here, we present the general framework and exemplify it by some concrete examples. We have implemented different instantiations on top of the configurable program analysis tool CPAchecker and report on experiments, in particular on comparisons with PCC techniques.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, journal = {{ACM Transactions on Programming Languages and Systems}}, number = {{2}}, pages = {{7:1--7:56}}, publisher = {{ACM}}, title = {{{Programs from Proofs: A Framework for the Safe Execution of Untrusted Software}}}, doi = {{10.1145/3014427}}, year = {{2017}}, } @misc{109, author = {{Pauck, Felix}}, publisher = {{Universität Paderborn}}, title = {{{Cooperative static analysis of Android applications}}}, year = {{2017}}, } @misc{201, author = {{Bröcher, Henrik}}, publisher = {{Universität Paderborn}}, title = {{{Evaluation von Graphpartitionierungsalgorithmen im Kontext von Konfigurierbarer Softwarezertifizierung}}}, year = {{2016}}, } @inproceedings{3157, author = {{Beringer, Steffen and Wehrheim, Heike}}, booktitle = {{Critical Systems: Formal Methods and Automated Verification - Joint 21st International Workshop on Formal Methods for Industrial Critical Systems and 16th International Workshop on Automated Verification of Critical Systems, FMICS-AVoCS 2016, Pisa, Italy, September 26-28, 2016, Proceedings}}, editor = {{H. ter Beek, Maurice and Gnesi, Stefania and Knapp, Alexander}}, pages = {{189----204}}, title = {{{Verification of AUTOSAR Software Architectures with Timed Automata}}}, doi = {{10.1007/978-3-319-45943-1_13}}, year = {{2016}}, } @inproceedings{3158, author = {{Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Theoretical Aspects of Computing - {ICTAC} 2016 - 13th International Colloquium, Taipei, Taiwan, ROC, October 24-31, 2016, Proceedings}}, editor = {{Sampaio, Augusto and Wang, Farn}}, pages = {{3----24}}, title = {{{Verification of Concurrent Programs on Weak Memory Models}}}, doi = {{10.1007/978-3-319-46750-4_1}}, year = {{2016}}, } @inproceedings{3159, author = {{Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{Integrated Formal Methods - 12th International Conference, {IFM} 2016, Reykjavik, Iceland, June 1-5, 2016, Proceedings}}, editor = {{Huisman, Marieke}}, pages = {{193----209}}, title = {{{Towards a Thread-Local Proof Technique for Starvation Freedom}}}, doi = {{10.1007/978-3-319-33693-0_13}}, year = {{2016}}, } @inproceedings{3160, author = {{Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}}, booktitle = {{20th International Conference on Principles of Distributed Systems, {OPODIS} 2016, December 13-16, 2016, Madrid, Spain}}, editor = {{Fatourou, Panagiota and Jim{\'{e}}nez, Ernesto and Pedone, Fernando}}, pages = {{35:1----35:17}}, title = {{{Proving Opacity of a Pessimistic {STM}}}}, doi = {{10.4230/LIPIcs.OPODIS.2016.35}}, year = {{2016}}, } @article{3161, author = {{Isenberg, Tobias and Jakobs, Marie{-}Christine and Pauck, Felix and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Deriving approximation tolerance constraints from verification runs}}}, year = {{2016}}, } @article{175, abstract = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning.In this paper, we propose a method for constructing service compositions based on pre-verified templates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Their meaning is defined by an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Templates are proven correct using a Hoare-style proof calculus, extended by a specific rule for service calls. Construction of service compositions amounts to instantiation of templates with domain-specific services. Correctness of an instantiation can then simply be checked by verifying that the domain ontology (a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}}, author = {{Walther, Sven and Wehrheim, Heike}}, journal = {{Science of Computer Programming}}, pages = {{2----23}}, publisher = {{Elsevier}}, title = {{{On-The-Fly Construction of Provably Correct Service Compositions - Templates and Proofs}}}, doi = {{10.1016/j.scico.2016.04.002}}, year = {{2016}}, } @inproceedings{186, abstract = {{Software verification is an established method to ensure software safety. Nevertheless, verification still often fails, either because it consumes too much resources, e.g., time or memory, or the technique is not mature enough to verify the property. Often then discarding the partial verification, the validation process proceeds with techniques like testing.To enable standard testing to profit from previous, partial verification, we use a summary of the verification effort to simplify the program for subsequent testing. Our techniques use this summary to construct a residual program which only contains program paths with unproven assertions. Afterwards, the residual program can be used with standard testing tools.Our first experiments show that testing profits from the partial verification.The test effort is reduced and combined verification and testing is faster than a complete verification.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Software Engineering 2016}}, editor = {{Jens Knoop, Uwe Zdun}}, pages = {{17--18}}, title = {{{Just test what you cannot verify!}}}, year = {{2016}}, } @inproceedings{224, abstract = {{In modern software development, paradigms like component-based software engineering (CBSE) and service-oriented architectures (SOA) emphasize the construction of large software systems out of existing components or services. Therein, a service is a self-contained piece of software, which adheres to a specified interface. In a model-based software design, this interface constitutes our sole knowledge of the service at design time, while service implementations are not available. Therefore, correctness checks or detection of potential errors in service compositions has to be carried out without the possibility of executing services. This challenges the usage of standard software error localization techniques for service compositions. In this paper, we review state-of-the-art approaches for error localization of software and discuss their applicability to service compositions.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC 2016)}}, pages = {{248----262}}, title = {{{A short survey on using software error localization for service compositions}}}, doi = {{10.1007/978-3-319-44482-6_16}}, year = {{2016}}, } @inproceedings{226, abstract = {{Error detection, localization and correction are time-intensive tasks in software development, but crucial to deliver functionally correct products. Thus, automated approaches to these tasks have been intensively studied for standard software systems. For model-based software systems, the situation is different. While error detection is still well-studied, error localization and correction is a less-studied domain. In this paper, we examine error localization and correction for models of service compositions. Based on formal definitions of error and correction in this context, we show that the classical approach of error localization and correction, i.e. first determining a set of suspicious statements and then proposing changes to these statements, is ineffective in our context. In fact, it lessens the chance to succeed in finding a correction at all.In this paper, we introduce correction proposal as a novel approach on error correction in service compositions integrating error localization and correction in one combined step. In addition, we provide an algorithm to compute such correction proposals automatically.}}, author = {{Krämer, Julia and Wehrheim, Heike}}, booktitle = {{Proceedings of the 1st International Workshop on Formal to Practical Software Verification and Composition (VeryComp 2016)}}, pages = {{445----457}}, title = {{{A Formal Approach to Error Localization and Correction in Service Compositions}}}, doi = {{10.1007/978-3-319-50230-4_35}}, year = {{2016}}, } @inproceedings{227, abstract = {{Information flow analysis studies the flow of data between program entities (e.g. variables), where the allowed flow is specified via security policies. Typical information flow analyses compute a conservative (over-)approximation of the flows in a program. Such an analysis may thus signal non-existing violations of the security policy.In this paper, we propose a new technique for inspecting the reported violations (counterexamples) for spuriousity. Similar to counterexample-guided-abstraction-refinement (CEGAR) in software verification, we use the result of this inspection to improve the next round of the analysis. We prove soundness of this scheme.}}, author = {{Töws, Manuel and Wehrheim, Heike}}, booktitle = {{Proceedings of the 18th International Conference on Formal Engineering Methods (ICFEM 2016)}}, pages = {{466----483}}, title = {{{A CEGAR Scheme for Information Flow Analysis}}}, doi = {{10.1007/978-3-319-47846-3_29}}, year = {{2016}}, } @inproceedings{170, abstract = {{We present PAndA2, an extendable, static analysis tool for Android apps which examines permission related security threats like overprivilege, existence of permission redelegation and permission flows. PAndA2 comes along with a textual and graphical visualization of the analysis result and even supports the comparison of analysis results for different android app versions.}}, author = {{Jakobs, Marie-Christine and Töws, Manuel and Pauck, Felix}}, booktitle = {{Workshop on Formal and Model-Driven Techniques for Developing Trustworthy Systems}}, editor = {{Ishikawa F, Romanovsky A, Troubitsyna E}}, title = {{{PAndA 2 : Analyzing Permission Use and Interplay in Android Apps (Tool Paper)}}}, year = {{2016}}, } @phdthesis{1190, author = {{Isenberg, Tobias}}, publisher = {{Universität Paderborn}}, title = {{{Induction-based Verification of Timed Systems}}}, year = {{2016}}, } @misc{162, author = {{Zhang, Guangli}}, publisher = {{Universität Paderborn}}, title = {{{Program Slicing: A Way of Separating WHILE Programs into Precise and Approximate Portions}}}, year = {{2016}}, } @misc{164, author = {{Czech, Mike}}, publisher = {{Universität Paderborn}}, title = {{{Predicting Rankings of Software Verification Tools Using Kernels for Structured Data}}}, year = {{2016}}, } @misc{133, abstract = {{.}}, author = {{Dewender, Markus}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Spin}}}, year = {{2016}}, } @misc{134, abstract = {{.}}, author = {{Heinisch, Philipp}}, publisher = {{Universität Paderborn}}, title = {{{Verifikation von Service Kompositionen mit Prolog}}}, year = {{2016}}, } @inproceedings{250, abstract = {{Before execution, users should formally validate the correctness of software received from untrusted providers. To accelerate this validation, in the proof carrying code (PCC) paradigm the provider delivers the software together with a certificate, a formal proof of the software’s correctness. Thus, the user only checks if the attached certificate shows correctness of the delivered software.Recently, we introduced configurable program certification, a generic, PCC based framework supporting various software analyses and safety properties. Evaluation of our framework revealed that validation suffers from certificate reading. In this paper, we present two orthogonal approaches which improve certificate validation, both reducing the impact of certificate reading. The first approach reduces the certificate size, storing information only if it cannot easily be recomputed. The second approach partitions the certificate into independently checkable parts. The trick is to read parts of the certificate while already checking read parts. Our experiments show that validation highly benefits from our improvements.}}, author = {{Jakobs, Marie-Christine}}, booktitle = {{Proceedings of the 13th International Conference on Software Engineering and Formal Methods (SEFM)}}, pages = {{159----174}}, title = {{{Speed Up Configurable Certificate Validation by Certificate Reduction and Partitioning}}}, doi = {{10.1007/978-3-319-22969-0_12}}, year = {{2015}}, } @inproceedings{283, abstract = {{Today, software verification is an established analysis method which can provide high guarantees for software safety. However, the resources (time and/or memory) for an exhaustive verification are not always available, and analysis then has to resort to other techniques, like testing. Most often, the already achieved partial verification results arediscarded in this case, and testing has to start from scratch.In this paper, we propose a method for combining verification and testing in which testing only needs to check the residual fraction of an uncompleted verification. To this end, the partial results of a verification run are used to construct a residual program (and residual assertions to be checked on it). The residual program can afterwards be fed into standardtesting tools. The proposed technique is sound modulo the soundness of the testing procedure. Experimental results show that this combinedusage of verification and testing can significantly reduce the effort for the subsequent testing.}}, author = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Fundamental Approaches to Software Engineering}}, editor = {{Egyed, Alexander and Schaefer, Ina}}, pages = {{100--114}}, title = {{{Just test what you cannot verify!}}}, doi = {{10.1007/978-3-662-46675-9_7}}, year = {{2015}}, } @inproceedings{285, abstract = {{We propose an incremental workflow for the verification of parameterized systems modeled as symmetric networks of timed automata. Starting with a small number of timed automata in the network, a safety property is verified using IC3, a state-of-the-art algorithm based on induction.The result of the verification, an inductive strengthening, is reused proposing a candidate inductive strengthening for a larger network.If the candidate is valid, our main theorem states that the safety property holds for all sizes of the network of timed automata. Otherwise the number of automata is increased and the next iteration is started with a new run of IC3.We propose and thoroughly examine optimizations to our workflow, e.g. Feedback mechanisms to speed up the run of IC3.}}, author = {{Isenberg, Tobias}}, booktitle = {{Proceedings of the 15th International Conference on Application of Concurrency to System Design (ACSD)}}, pages = {{1--9 }}, title = {{{Incremental Inductive Verification of Parameterized Timed Systems}}}, doi = {{10.1109/ACSD.2015.13}}, year = {{2015}}, } @phdthesis{246, author = {{Besova, Galina}}, publisher = {{Universität Paderborn}}, title = {{{Systematic Development and Re-Use of Model Tranformations}}}, year = {{2015}}, } @inproceedings{262, abstract = {{Programs from Proofs" is a generic method which generates new programs out of correctness proofs of given programs. The technique ensures that the new and given program are behaviorally equivalent and that the new program is easily verifiable, thus serving as an alternative to proof-carrying code concepts. So far, this generic method has one instantiation that verifies type-state properties of programs. In this paper, we present a whole range of new instantiations, all based on data ow analyses. More precisely, we show how an imprecise but fast data ow analysis can be enhanced with a predicate analysis as to yield a precise but expensive analysis. Out of the safety proofs of this analysis, we generate new programs, again behaviorally equivalent to the given ones, which are easily verifiable" in the sense that now the data ow analysis alone can yield precise results. An experimental evaluation practically supports our claim of easy verification.}}, author = {{Jakobs, Marie-Christine and Wehrheim, Heike}}, booktitle = {{Proceedings of the 30th Annual ACM Symposium on Applied Computing}}, pages = {{1729--1736}}, title = {{{Programs from Proofs of Predicated Dataflow Analyses}}}, doi = {{10.1145/2695664.2695690}}, year = {{2015}}, } @article{290, abstract = {{Model transformation is a key concept in model-driven software engineering. The definition of model transformations is usually based on meta-models describing the abstract syntax of languages. While meta-models are thereby able to abstract from uperfluous details of concrete syntax, they often loose structural information inherent in languages, like information on model elements always occurring together in particular shapes. As a consequence, model transformations cannot naturally re-use language structures, thus leading to unnecessary complexity in their development as well as in quality assurance.In this paper, we propose a new approach to model transformation development which allows to simplify the developed transformations and improve their quality via the exploitation of the languages׳ structures. The approach is based on context-free graph grammars and transformations defined by pairing productions of source and target grammars. We show that such transformations have important properties: they terminate and are sound, complete, and deterministic.}}, author = {{Besova, Galina and Steenken, Dominik and Wehrheim, Heike}}, journal = {{Computer Languages, Systems & Structures}}, pages = {{116--138}}, publisher = {{Elsevier}}, title = {{{Grammar-based model transformations: Definition, execution, and quality properties}}}, doi = {{10.1016/j.cl.2015.05.003}}, year = {{2015}}, } @article{3162, author = {{Ziegert, Steffen and Wehrheim, Heike}}, journal = {{Computer Science - R & D}}, number = {{3-4}}, pages = {{303----320}}, title = {{{Temporal plans for software architecture reconfiguration}}}, doi = {{10.1007/s00450-014-0259-7}}, year = {{2015}}, } @inproceedings{3163, author = {{Meyer, Roland and Wehrheim, Heike}}, booktitle = {{Correct System Design - Symposium in Honor of Ernst-R{\"{u}}diger Olderog on the Occasion of His 60th Birthday, Oldenburg, Germany, September 8-9, 2015. Proceedings}}, editor = {{Meyer, Roland and Platzer, Andr{\'{e}} and Wehrheim, Heike}}, pages = {{3----4}}, title = {{{From Program Verification to Time and Space: The Scientific Life of Ernst-R{\"{u}}diger Olderog}}}, doi = {{10.1007/978-3-319-23506-6_1}}, year = {{2015}}, } @inproceedings{3164, author = {{Derrick, John and Dongol, Brijesh and Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}}, booktitle = {{{FM} 2015: Formal Methods - 20th International Symposium, Oslo, Norway, June 24-26, 2015, Proceedings}}, editor = {{Bj{\o}rner, Nikolaj and S. de Boer, Frank}}, pages = {{161----177}}, title = {{{Verifying Opacity of a Transactional Mutex Lock}}}, doi = {{10.1007/978-3-319-19249-9_11}}, year = {{2015}}, } @inproceedings{3165, author = {{Wehrheim, Heike and Travkin, Oleg}}, booktitle = {{Hardware and Software: Verification and Testing - 11th International Haifa Verification Conference, {HVC} 2015, Haifa, Israel, November 17-19, 2015, Proceedings}}, editor = {{Piterman, Nir}}, pages = {{104----119}}, title = {{{{TSO} to {SC} via Symbolic Execution}}}, doi = {{10.1007/978-3-319-26287-1_7}}, year = {{2015}}, } @proceedings{3166, editor = {{Meyer, Roland and Platzer, Andr{\'{e}} and Wehrheim, Heike}}, isbn = {{978-3-319-23505-9}}, title = {{{Correct System Design - Symposium in Honor of Ernst-R{\"{u}}diger Olderog on the Occasion of His 60th Birthday, Oldenburg, Germany, September 8-9, 2015. Proceedings}}}, doi = {{10.1007/978-3-319-23506-6}}, year = {{2015}}, } @inproceedings{336, abstract = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning. In this paper, we propose a method to construct service compositions based on pre-verifiedtemplates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Templates are proven correct using an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Construction of service compositions amounts to instantiation of templates with domain-specific services.Correctness of an instantiation can then simply be checked by verifying that the domain ontology(a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}}, author = {{Walther, Sven and Wehrheim, Heike}}, booktitle = {{Proceedings of the 11th International Symposium on Formal Aspects of Component Software (FACS)}}, editor = {{Lanese, Ivan and Madelaine, Eric}}, pages = {{31--48}}, title = {{{Verified Service Compositions by Template-Based Construction}}}, doi = {{10.1007/978-3-319-15317-9_3}}, year = {{2014}}, } @misc{340, author = {{Korth, Philipp}}, publisher = {{Universität Paderborn}}, title = {{{Untersuchung transitiver Eigenschaften der Technik "Programs from Proofs"}}}, year = {{2014}}, } @inproceedings{344, author = {{Becker, Matthias and Becker, Steffen and Besova, Galina and Walther, Sven and Wehrheim, Heike}}, booktitle = {{Proceedings of the 40th Euromicro Conference on Software Engineering and Advanced Applications (Work in Progress Session)}}, title = {{{Towards Systematic Configuration for Architecture Validation}}}, year = {{2014}}, } @inproceedings{353, abstract = {{There are many technologies for the automation of processesthat deal with services; examples are service discovery and composition.Automation of these processes requires that the services are described semantically. However, semantically described services are currently not oronly rarely available, which limits the applicability of discovery and composition approaches. The systematic support for creating new semanticservices usable by automated technologies is an open problem.We tackle this problem with a template based approach: Domain independent templates are instantiated with domain specific services andboolean expressions. The obtained services have semantic descriptionswhose correctness directly follows from the correctness of the template.Besides the theory, we present experimental results for a service repository in which 85% of the services were generated automatically.}}, author = {{Mohr, Felix and Walther, Sven}}, booktitle = {{Proceedings of the 14th International Conference on Software Reuse (ICSR)}}, pages = {{188--203}}, title = {{{Template-based Generation of Semantic Services}}}, doi = {{10.1007/978-3-319-14130-5_14}}, year = {{2014}}, } @inproceedings{383, abstract = {{Proof-carrying code approaches aim at safe execution of untrusted code by having the code producer attach a safety proof to the code which the code consumer only has to validate. Depending on the type of safety property, proofs can however become quite large and their validation - though faster than their construction - still time consuming. In this paper we introduce a new concept for safe execution of untrusted code. It keeps the idea of putting the time consuming part of proving on the side of the code producer, however, attaches no proofs to code anymore but instead uses the proof to transform the program into an equivalent but more efficiently verifiable program. Code consumers thus still do proving themselves, however, on a computationally inexpensive level only. Experimental results show that the proof effort can be reduced by several orders of magnitude, both with respect to time and space.}}, author = {{Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike}}, booktitle = {{Proceedings of the Software Engineering Conference (SE)}}, pages = {{67--68}}, title = {{{Programs from Proofs -- Approach and Applications}}}, year = {{2014}}, }