publications.bib

@incollection{springerlink:10.1007/978-3-642-27940-9_19,
  abstract = {We consider automatic synthesis from linear temporal logic specifications for asynchronous systems. We aim the produced reactive systems to be used as software in a multi-threaded environment. We extend previous reduction of asynchronous synthesis to synchronous synthesis to the setting of multiple input and multiple output variables. Much like synthesis for synchronous designs, this solution is not practical as it requires determinization of automata on infinite words and solution of complicated games. We follow advances in synthesis of synchronous designs, which restrict the handled specifications but achieve scalability and efficiency. We propose a heuristic that, in some cases, maintains scalability for asynchronous synthesis. Our heuristic can prove that specifications are realizable and extract designs. This is done by a reduction to synchronous synthesis that is inspired by the theoretical reduction.},
  author = {Klein, Uri and Piterman, Nir and Pnueli, Amir},
  booktitle = {Verification, Model Checking, and Abstract Interpretation},
  comment = {10.1007/978-3-642-27940-9\_19},
  doi = {10.1007/978-3-642-27940-9_19},
  editor = {Kuncak, Viktor and Rybalchenko, Andrey},
  pages = {283--298},
  posted-at = {2012-01-23 04:53:14},
  priority = {2},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {Effective Synthesis of Asynchronous Systems from {GR}(1) Specifications},
  url = {http://cs.nyu.edu/acsys/pubs/KPP_VMCAI12.pdf},
  volume = {7148},
  year = {2012}
}
@techreport{KPP_NYU_TR2011_944,
  abstract = {{We consider automatic synthesis from linear temporal logic specifications for asynchronous systems. We aim the produced reactive systems to be used as software in a multi-threaded environment. We extend previous reduction of asynchronous synthesis to synchronous synthesis to the setting of multiple input and multiple output variables. Much like synthesis for synchronous designs, this solution is not practical as it requires determinization of automata on infinite words and solution of complicated games. We follow advances in synthesis of synchronous designs, which restrict the handled specifications but achieve scalability and efficiency. We propose a heuristic that, in some cases, maintains scalability for asynchronous synthesis. Our heuristic can prove that specifications are realizable and extract designs. This is done by a reduction to synchronous synthesis that is inspired by the theoretical reduction.}},
  author = {Klein, Uri and Piterman, Nir and Pnueli, Amir},
  institution = {Courant Institute of Mathematical Sciences, NYU},
  posted-at = {2011-12-23 15:14:50},
  priority = {2},
  title = {{Effective Synthesis of Asynchronous Systems from GR(1) Specifications: CIMS, NYU - Technical Report TR2011-944 (an extended version of a VMCAI'12 paper)}},
  url = {http://cs.nyu.edu/web/Research/TechReports/TR2011-944/TR2011-944.pdf},
  year = {2011}
}
@inproceedings{KB11,
  abstract = {We describe an abstract methodology for exploring and categorizing the space of error traces for a system using a procedure based on Satisfiability Modulo Theories and Bounded Model Checking. A key component required by the technique is a way to generalize an error trace into a category of error traces. We describe tools and techniques to support a human expert in this generalization task. Finally, we report on a case study in which the methodology is applied to a simple version of the Traffic Air and Collision Avoidance System.},
  author = {King, Tim and Barrett, Clark},
  booktitle = {Proceedings of the 9th International Workshop on Satisfiability Modulo Theories (SMT '11)},
  month = jul,
  posted-at = {2011-09-22 20:23:59},
  priority = {0},
  title = {Exploring and Categorizing Error Spaces using {BMC} and {SMT}},
  url = {http://cs.nyu.edu/acsys/pubs/KB11.pdf},
  year = {2011}
}
@inproceedings{JB10-SMT,
  abstract = {One of the main shortcomings of the traditional methods for combining theories is the complexity of guessing the arrangement of the variables shared by the individual theories. This paper presents a reformulation of the {Nelson-Oppen} method that takes into account explicit equality propagation and can ignore pairs of shared variables that the theories do not care about. We show the correctness of the new approach and present care functions for the theories of uninterpreted functions and the theory of arrays. The effectiveness of the new method is illustrated by encouraging experimental results.},
  author = {Jovanovi\'{c}, Dejan and Barrett, Clark},
  booktitle = {Proceedings of the 8th International Workshop on Satisfiability Modulo Theories (SMT '10)},
  month = jul,
  posted-at = {2011-09-22 20:22:50},
  priority = {2},
  title = {Sharing is Caring},
  url = {http://cs.nyu.edu/acsys/pubs/JB10-SMT.pdf},
  year = {2010}
}
@inproceedings{Jovanovic2010Polite,
  abstract = {The classic method of Nelson and Oppen for combining decision procedures requires the theories to be stably-infinite. Unfortunately, some important theories do not fall into this category (e.g. the theory of bit-vectors). To remedy this problem, previous work introduced the notion of polite theories. Polite theories can be combined with any other theory using an extension of the {Nelson-Oppen} approach. In this paper we revisit the notion of polite theories, fixing a subtle flaw in the original definition. We give a new combination theorem which specifies the degree to which politeness is preserved when combining polite theories. We also give conditions under which politeness is preserved when instantiating theories by identifying two sorts. These results lead to a more general variant of the theorem for combining multiple polite theories.},
  address = {Berlin, Heidelberg},
  author = {Jovanovi\'{c}, Dejan and Barrett, Clark},
  booktitle = {Proceedings of the 17th international conference on Logic for programming, artificial intelligence, and reasoning},
  doi = {10.1007/978-3-642-16242-8},
  isbn = {3-642-16241-X, 978-3-642-16241-1},
  location = {Yogyakarta, Indonesia},
  pages = {402--416},
  posted-at = {2011-09-22 20:12:27},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {LPAR'10},
  title = {Polite Theories Revisited},
  url = {http://cs.nyu.edu/acsys/pubs/JB10.pdf},
  year = {2010}
}
@incollection{cvc4,
  abstract = {{CVC4} is the latest version of the Cooperating Validity Checker. A joint project of {NYU} and U Iowa, {CVC4} aims to support the useful feature set of {CVC3} and {SMT}-{LIBv2} while optimizing the design of the core system architecture and decision procedures to take advantage of recent engineering and algorithmic advances. {CVC4} represents a completely new code base; it is a from-scratch rewrite of {CVC3}, and many subsystems have been completely redesigned. Additional decision procedures for {CVC4} are currently under development, but for what it currently achieves, it is a lighter-weight and higher-performing tool than {CVC3}. We describe the system architecture, subsystems of note, and discuss some applications and continuing work.},
  address = {Berlin, Heidelberg},
  author = {Barrett, Clark and Conway, Christopher and Deters, Morgan and Hadarean, Liana and Jovanovi\'{c}, Dejan and King, Tim and Reynolds, Andrew and Tinelli, Cesare},
  chapter = {14},
  doi = {10.1007/978-3-642-22110-1_14},
  editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
  isbn = {978-3-642-22109-5},
  journal = {Computer Aided Verification},
  pages = {171--177},
  posted-at = {2011-09-22 20:12:09},
  priority = {2},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {CVC4},
  url = {http://cs.nyu.edu/acsys/pubs/BCD+11.pdf},
  volume = {6806},
  year = {2011}
}
@incollection{KN11,
  abstract = {{REST} is a software architectural style used for the design of highly scalable web applications. Interest in {REST} has grown rapidly over the past decade, spurred by the growth of open web {APIs}. On the other hand, there is also considerable confusion surrounding {REST}: many examples of supposedly {RESTful} {APIs} violate key {REST} constraints. We show that the constraints of {REST} and of {RESTful} {HTTP} can be precisely formulated within temporal logic. This leads to methods for model checking and run-time verification of {RESTful} behavior. We formulate several relevant verification questions and analyze their complexity.},
  author = {Klein, Uri and Namjoshi, Kedar S.},
  booktitle = {Computer Aided Verification},
  comment = {10.1007/978-3-642-22110-1\_43},
  doi = {10.1007/978-3-642-22110-1_43},
  editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
  pages = {541--556},
  posted-at = {2011-09-16 01:22:34},
  priority = {2},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {Formalization and Automated Verification of {RESTful} Behavior},
  url = {http://cs.nyu.edu/acsys/pubs/KN_CAV11.pdf},
  volume = {6806},
  year = {2011}
}
@techreport{KN_NYU_TR2011_938,
  abstract = {{REST} is a software architectural style used for the design of highly scalable web applications. Interest in {REST} has grown rapidly over the past decade, spurred by the growth of open web {APIs}. On the other hand, there is also considerable confusion surrounding {REST}: many examples of supposedly {RESTful} {APIs} violate key {REST} {constraints.We} show that the constraints of {REST} and of {RESTful} {HTTP} can be precisely formulated within temporal logic. This leads to methods for model checking and run-time verification of {RESTful} behavior. We formulate several relevant verification questions and analyze their complexity.},
  author = {Klein, Uri and Namjoshi, Kedar S.},
  institution = {Courant Institute of Mathematical Sciences, NYU},
  posted-at = {2011-05-27 23:22:18},
  priority = {2},
  title = {{Formalization and Automated Verification of RESTful Behavior: CIMS, NYU - Technical Report TR2011-938 (an extended version of a CAV'11 paper)}},
  url = {http://cs.nyu.edu/web/Research/TechReports/TR2011-938/TR2011-938.pdf},
  year = {2011}
}
@incollection{KP10,
  abstract = {The last few years have seen a rising interest in the problem
of synthesizing systems from temporal logic specifications. One major
contributor to this is the recent work of Piterman et al., which showed
how polynomial time synthesis could be achieved for a class of {LTL}
specifications that is large enough and expressive enough to cover an
extensive number of complex, real-world, applications (despite a known
doubly-exponential time lower bound for general {LTL} formulae). That
approach has already been used extensively for the synthesis of various
applications and as basis for further theoretical work on synthesis.

Here, we expose a fundamental flaw in the initial processing of specifications
in that paper and demonstrate how it may produce incorrect
results, declaring that specifications could not be synthesized when, in
fact, they could. We then identify a class of specifications for which this
initial processing is sound and complete. Thus, giving an insight to the
reason that this problem arises in the first place. We also show that it can
be easily checked whether specifications belong to the sound and complete
class by using the same synthesis techniques. Finally, we show in
the cases that specifications do not fall into this category how to modify
them so that their processing is, indeed, both sound and complete.},
  author = {Klein, Uri and Pnueli, Amir},
  booktitle = {Hardware and Software: Verification and Testing (Proceedings of HVC '10)},
  doi = {10.1007/978-3-642-19583-9_16},
  editor = {Barner, Sharon and Harris, Ian and Kroening, Daniel and Raz, Orna},
  pages = {161--181},
  posted-at = {2011-03-21 14:29:26},
  priority = {2},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science (LNCS)},
  title = {Revisiting Synthesis of {GR}(1) Specifications},
  url = {http://cs.nyu.edu/acsys/pubs/KP_HVC10.pdf},
  volume = {6504},
  year = {2011}
}
@incollection{Conway2010Verifying,
  abstract = {For efficiency and portability, network packet processing code is typically written in low-level languages and makes use of bit-level operations to compactly represent data. Although packet data is highly structured, low-level implementation details make it difficult to verify that the behavior of the code is consistent with high-level data invariants. We introduce a new approach to the verification problem, using a high-level definition of packet types as part of a specification rather than an implementation. The types are not used to check the code directly; rather, the types introduce functions and predicates that can be used to assert the consistency of code with programmer-defined data assertions. We describe an encoding of these types and functions using the theories of inductive datatypes, bit vectors, and arrays in the Cvc {SMT} solver. We present a case study in which the method is applied to open-source networking code and verified within the Cascade verification platform.},
  address = {Berlin, Heidelberg},
  author = {Conway, Christopher and Barrett, Clark},
  booktitle = {Computer Aided Verification},
  chapter = {28},
  doi = {10.1007/978-3-642-14295-6_28},
  editor = {Touili, Tayssir and Cook, Byron and Jackson, Paul},
  isbn = {978-3-642-14294-9},
  pages = {306--320},
  posted-at = {2011-02-09 18:23:19},
  priority = {2},
  publisher = {Springer Berlin / Heidelberg},
  series = {Lecture Notes in Computer Science},
  title = {Verifying {Low-Level} Implementations of {High-Level} Datatypes Computer Aided Verification},
  url = {http://cs.nyu.edu/acsys/pubs/CB10.pdf},
  volume = {6174},
  year = {2010}
}
@inproceedings{PK09,
  abstract = {The paper investigates a development process for reactive programs, in which the program is automatically generated (synthesized) from a high-level temporal specification. The method is based on previous results that proposed a similar synthesis method for the automatic construction of hardware designs from their temporal specifications. Thus, the work reported here can be viewed as a generalization of existing methods for the synthesis of synchronous reactive systems into the synthesis of asynchronous systems.

In the synchronous case it was possible to identify a restricted subclass of formulas and present an algorithm that solves the synthesis problem for these restricted specifications in polynomial time. Here the results are less definitive in the sense that we can offer some heuristics that may provide polynomial-time solutions only in some of the cases.},
  author = {Pnueli, Amir and Klein, Uri},
  booktitle = {Proceedings of the 7th IEEE/ACM International Conference on Formal Methods and Models for Co-Design (MEMOCODE '09)},
  doi = {10.1109/MEMCOD.2009.5185372},
  month = jul,
  pages = {1--7},
  posted-at = {2011-02-08 20:21:21},
  priority = {0},
  title = {Synthesis of Programs from Temporal Property Specifications},
  url = {http://cs.nyu.edu/acsys/pubs/PK_MEMOCODE09.pdf},
  year = {2009}
}
@inproceedings{ReyHTGSB-SMT-10,
  abstract = {{LFSC} is a high-level declarative language for defining proof systems and proof objects for virtually any logic. One of its distinguishing features is its support for computational side conditions on proof rules. Side conditions facilitate the design of proof systems that reflect closely the sort of high-performance inferences made by {SMT} solvers. This paper investigates the issue of balancing declarative and computational inference in {LFSC} focusing on (quantifier-free) Linear Real Arithmetic. We discuss a few alternative proof systems for {LRA} and report on our comparative experimental results on generating and checking proofs in them.},
  author = {Reynolds, Andrew and Hadarean, Liana and Tinelli, Cesare and Ge, Yeting and Stump, Aaron and Barrett, Clark},
  booktitle = {Proceedings of the 8th International Workshop on Satisfiability Modulo Theories (Edinburgh, England)},
  editor = {Gupta, A. and Kroening, D.},
  posted-at = {2011-02-08 18:52:58},
  priority = {2},
  title = {Comparing Proof Systems for Linear Real Arithmetic with {LFSC}},
  url = {http://cs.nyu.edu/acsys/pubs/RHTGSB.pdf},
  year = {2010}
}
@inproceedings{ZPFG03,
  abstract = {There is a growing awareness, both in industry and academia, of the crucial role of formally
verifying the translation from high-level source-code into low-level object code that is typically performed
by an optimizing compiler. Formally verifying an optimizing compiler, as one would verify any other large
program, is not feasible due to its size, ongoing evolution and modification, and, possibly, proprietary
considerations. Translation validation is a novel approach that offers an alternative to the verification of
translators in general and compilers in particular: Rather than verifying the compiler itself, one constructs
a validation tool which, after every run of the compiler, formally confirms that the target code produced
in the run is a correct translation of the source program. The paper presents voc, a methodology for the
translation validation of optimizing compilers. We distinguish between structure preserving optimizations,
for which we establish a simulation relation between the source and target code based on computational
induction, and structure modifying optimizations, for which we develop specialized "permutation rules".
The paper also describes voc-64--a prototype translation validator tool that automatically produces
verification conditions for the global optimizations of the {SGI} Pro-64 compiler.},
  author = {Zuck, Lenore and Pnueli, Amir and Fang, Yi and Goldberg, Benjamin},
  booktitle = {Journal of Universal Computer Science},
  doi = {10.3217/jucs-009-03-0223},
  posted-at = {2011-02-08 17:48:53},
  priority = {2},
  title = {{VOC}: A Methodology for the Translation Validation of Optimizing Compilers},
  url = {http://cs.nyu.edu/acsys/pubs/main.pdf},
  year = {2003}
}
@inproceedings{fmcad07,
  abstract = {We show how to verify the correctness of transactional memory implementations with a model checker. We show how to specify
transactional memory in terms of the admissible interchange of transaction operations, and give proof rules for showing that an implementation satisfies this specification. This notion of an
admissible interchange is a key to our ability to use a model checker, and lets us capture the various notions of

transaction conflict as characterized by Scott. We demonstrate our work using
the {TLC} model checker to verify several well-known implementations described abstractly in the {TLA}+  specification
language.},
  author = {Cohen, A. and O'Leary, J. W. and Pnueli, A. and Tuttle, M. R. and Zuck, L. D.},
  booktitle = {Proceedings of the 7th International Conference on Formal Methods in Computer-Aided Design (FMCAD)},
  doi = {10.1109/FAMCAD.2007.40},
  month = nov,
  pages = {37--44},
  posted-at = {2011-02-08 17:48:44},
  priority = {2},
  title = {Verifying Correctness of Transactional Memories},
  url = {http://cs.nyu.edu/acsys/pubs/cohen-TransactionalMemory1.pdf},
  year = {2007}
}
@misc{transact08,
  abstract = {Transactional memory is a programming abstraction intended to simplify the synchronization of conflicting memory accesses (by concurrent threads) without the difficulties associated with locks. In a previous work we presented a formal framework for proving that a transactional memory implementation satisfies its specifications and provided with model checking verification of some using small instantiations. This paper extends the previous work to capture nontransactional accesses to memory, which occurs, for example, when using legacy {code.We} provide a mechanical proof of the soundness of the verification method, as well as a mechanical verification of a version of the popular {TCC} implementation that includes non-transactional memory accesses. The verification is performed by the deductive temporal checker {TLPVS}.},
  author = {Cohen, A. and Pnueli, A. and Zuck, L. D.},
  month = feb,
  posted-at = {2011-02-08 17:48:37},
  priority = {2},
  title = {Verification of Transactional Memories that support {Non-Transactional} Memory Accesses},
  url = {http://cs.nyu.edu/acsys/pubs/main1.pdf},
  year = {2008}
}
@inproceedings{HBG+05,
  abstract = {Translation validation is a technique for ensuring that a translator, such as a compiler, produces correct results. Because complete verification of the translator itself is often infeasible, translation validation advocates coupling the verification task with the translation task, so that each run of the translator produces verification conditions which, if valid, prove the correctness of the translation. In previous work, the translation validation approach was used to give a framework for proving the correctness of a variety of compiler optimizations, with a recent focus on loop transformations. However, some of these ideas were preliminary and had not been implemented. Additionally, there were examples of common loop transformations which could not be handled by our previous approaches. This paper addresses these issues. We introduce a new rule {REDUCE} for loop reduction transformations, and we generalize our previous rule {VALIDATE} so that it can handle more transformations involving loops. We then describe how all of this (including some previous theoretical work) is implemented in our compiler validation tool {TVOC}.},
  author = {Hu, Ying and Barrett, Clark and Goldberg, Benjamin and Pnueli, Amir},
  booktitle = {Proceedings of the 4th International Workshop on Compiler Optimization meets Compiler Verificaiton (COCV '05)},
  doi = {10.1016/j.entcs.2005.02.044},
  editor = {Knoop, J. and Necula, G. C. and Zimmermann, W.},
  month = dec,
  pages = {69--84},
  posted-at = {2011-02-08 17:48:32},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Validating More Loop Optimizations},
  url = {http://cs.nyu.edu/acsys/pubs/HBG+05.ps},
  volume = {141(2)},
  year = {2005}
}
@inproceedings{BFG+05,
  abstract = {We describe a compiler validation tool called {TVOC}, that uses the translation validation approach to check the validity of compiler optimizations: for a given 
source program, {TVOC} proves the equivalence of the source code and the target 
code produced by running the compiler. {TVOC} accepts as input the source and target representations of the code in the {WHIRL} intermediate representation format. Then, {TVOC} generates a set of verification conditions sufficient to show 
that the two representations are equivalent.  These verification conditions are validated using the automatic theorem prover {CVC} Lite.  There are two phases to the verification process: the first phase verifies loop transformations using the proof rule {PERMUTE}; the second phase verifies structure-preserving optimizations using the proof rule {VALIDATE}.  In this paper, we briefly describe 
{TVOC} and illustrate its use with a simple example.},
  author = {Barrett, Clark and Fang, Yi and Goldberg, Ben and Hu, Ying and Pnueli, Amir and Zuck, Lenore},
  booktitle = {Proceedings of the 17th International Conference on Computer Aided Verification (CAV '05)},
  doi = {10.1007/11513988_29},
  editor = {Etessami, Kousha and Rajamani, Sriram K.},
  month = jul,
  pages = {291--295},
  posted-at = {2011-02-08 17:47:53},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{TVOC}: A Translation Validator for Optimizing Compilers},
  url = {http://cs.nyu.edu/acsys/pubs/tvoc-cav05.pdf},
  volume = {3576},
  year = {2005}
}
@inproceedings{PZTVIO06,
  abstract = {Translation Validation is an approach of ensuring compilation correctness in which each 
compiler run is followed by a validation pass that proves that the target code  produced by 
the compiler is a correct translation (implementation) of the source code.

In this work, we extend the existing framework for translation validation of optimizing 
compilers to deal with procedural programs and define the notion of correct translation 
for reactive programs with intermediate inputs and outputs. We present an Interprocedural 
Translation Validation algorithm that automatically proves correct translation of the source 
program to the target program in presence of interprocedural optimizations such as global 
constant propagation, inlining, tail-recursion elimination, interprocedural dead code elimination, 
dead argument elimination, and cloning.},
  author = {Pnueli, Amir and Zaks, Anna},
  booktitle = {Proceedings of the 4th International Workshop on Software Verification and Validation (SVV 2006)},
  posted-at = {2011-02-08 17:47:46},
  priority = {2},
  title = {Translation Validation of Interprocedural Optimizations},
  url = {http://cs.nyu.edu/acsys/pubs/TVIO.pdf},
  year = {2006}
}
@article{ZPG+05,
  abstract = {This paper presents new approaches to the validation of loop optimizations that compilers use to obtain the highest performance from modern architectures. Rather than verify the compiler, the approach of translation validation performs a validation check after every run of the compiler, producing a formal proof that the produced target code is a correct implementation of the source code.

As part of an active and ongoing research project on translation validation, we have previously described approaches for validating optimizations that preserve the loop structure of the code and have presented a simulation-based general technique for validating such optimizations. In this paper, for more aggressive optimizations that alter the loop structure of the code -- such as distribution, fusion, tiling, and interchange -- we present a set of permutation rules which establish that the transformed code satisfies all the implied data dependencies necessary for the validity of the considered transformation. We describe the extensions to our tool {VOC}-64 which are required to validate these structure-modifying optimizations.

This paper also discusses preliminary work on run-time validation of speculative loop optimizations. This involves using run-time tests to ensure the correctness of loop optimizations whose correctness cannot be guaranteed at compile time. Unlike compiler validation, run-time validation must not only determine when an optimization has generated incorrect code, but also recovering from the optimization without aborting the program or producing an incorrect result. This technique has been applied to several loop optimizations, including loop interchange and loop tiling, and appears to be quite promising.},
  author = {Zuck, Lenore and Pnueli, Amir and Goldberg, Benjamin and Barrett, Clark and Fang, Yi and Hu, Ying},
  doi = {10.1007/s10703-005-3402-z},
  journal = {Formal Methods in System Design (FMSD)},
  month = nov,
  number = {3},
  pages = {335--360},
  posted-at = {2011-02-08 17:47:39},
  priority = {2},
  publisher = {Springer Netherlands},
  title = {Translation and {Run-Time} Validation of Loop Transformations},
  url = {http://cs.nyu.edu/acsys/pubs/ZPG+05.ps},
  volume = {27},
  year = {2005}
}
@inproceedings{HBG04,
  abstract = {Translation validation is a technique that verifies the results of every run of a translator, such as a compiler, instead of the translator itself. Previous papers by the authors and others have described translation validation for compilers that perform loop optimizations (such as interchange, tiling, fusion, etc), using a proof rule that treats loop optimizations as permutations. In this paper, we describe an improved permutation proof rule which considers the initial conditions and invariant conditions of the loop. This new proof rule not only improves the validation process for compile-time optimizations, it can also be used to ensure the correctness of speculative loop optimizations, the aggressive optimizations which are only correct under certain conditions that cannot be known at compile time. Based on the new permutation rule, with the help of an automatic theorem prover, {CVC} Lite, an algorithm is proposed for validating loop optimizations. The same permutation proof rule can also be used (within a compiler, for example) to generate the run-time tests necessary to support speculative optimizations.},
  author = {Hu, Ying and Barrett, Clark and Goldberg, Benjamin},
  booktitle = {Proceedings of the 2nd IEEE International Conference on Software Engineering and Formal Methods (SEFM '04)},
  doi = {10.1109/SEFM.2004.45},
  month = sep,
  pages = {281--289},
  posted-at = {2011-02-08 17:47:32},
  priority = {2},
  publisher = {IEEE Computer Society},
  title = {Theory and Algorithms for the Generation and Validation of Speculative Loop Optimizations},
  url = {http://cs.nyu.edu/acsys/pubs/HBG04.ps},
  year = {2004}
}
@inproceedings{KHPLB05,
  abstract = {We provide semantics for the powerful scenario-based language of live sequence charts
({LSCs}). We show how the semantics of live sequence charts can be captured using
temporal logic. This is done by studying various subsets of the {LSC} language and
providing an explicit translation into temporal logic. We show how a kernel subset of the
{LSC} language (which omits variables, for example) can be embedded within the temporal
logic {CTL}*. For this kernel subset the embedding is a strict inclusion. We show that
existential charts can be expressed using the branching temporal logic {CTL} while
universal charts are in the intersection of linear temporal logic and branching temporal
logic {LTL} ⋂ {CTL}. Since our translations are efficient, the work described here may be
used in the development of tools for analyzing and executing scenario-based
requirements and for verifying systems against such requirements.},
  author = {Kugler, Hillel and Harel, David and Pnueli, Amir and Lu, Yuan and Bontemps, Yves},
  booktitle = {Proc. 11th Intl. Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS'05)},
  doi = {10.1007/978-3-540-31980-1_29},
  posted-at = {2011-02-08 17:47:26},
  priority = {2},
  title = {Temporal Logic for {Scenario-Based} Specifications},
  url = {http://cs.nyu.edu/acsys/pubs/tacas05.pdf},
  year = {2005}
}
@inproceedings{KPP07,
  abstract = {Live Sequence Charts ({LSCs}) is a scenario-based language for modeling object-based reactive systems with
liveness properties.  A tool called the {Play-Engine} allows a user to create {LSCs}
using a point-and-click interface and view certain executable traces of 
{LSC} specifications using a feature called smart play-out.  The user can play the 
role of an external environment by choosing an environment input action and watching the system respond
with a sequence of system actions called a super-step.  If at least one super-step exists which does not violate
the requirements, smart play-out chooses one arbitrarily and executes it.

We are interested in constructing reactive software systems that are guaranteed to never violate the
{LSC} requirements.  Non-violation is not guaranteed using smart play-out because it may
choose a super-step leading to a state from which no further super-steps exist.
In this work, we consider an extension to smart play-out that performs a complete analysis
of the state space and guarantees a system strategy for non-violation, provided a strategy exists.
Using this method, we may synthesize correct executable programs directly from {LSC} requirements.},
  author = {Kugler, Hillel and Plock, Cory and Pnueli, Amir},
  booktitle = {OOPSLA Companion},
  doi = {10.1145/1297846.1297895},
  pages = {801--802},
  posted-at = {2011-02-08 17:46:58},
  priority = {2},
  title = {Synthesizing reactive systems from {LSC} requirements using the {Play-Engine}},
  url = {http://cs.nyu.edu/acsys/pubs/p801.pdf},
  year = {2007}
}
@techreport{BNO+06-TR,
  abstract = {Lazy algorithms for Satisfiability Modulo Theories ({SMT}) combine a generic {DPLL}-based {SAT} engine with a theory solver for the given theory T that can decide the T-consistency of conjunctions of ground literals. For many theories of interest, theory solvers need to reason by performing internal case splits. Here we argue that it is more convenient to delegate these case splits to the {DPLL} engine instead. The delegation can be done on demand for solvers that can encode their internal case splits into one or more clauses, possibly including new constants and literals. This results in drastically simpler theory solvers. We present this idea in an improved version of {DPLL}(T), a general {SMT} architecture for the lazy approach, and formalize and prove it correct in an extension of Abstract {DPLL} Modulo Theories, a framework for modeling and reasoning about lazy algorithms for {SMT}. A remarkable additional feature of the architecture, also discussed in the paper, is that it naturally includes an efficient {Nelson-Oppen}-like combination of multiple theories and their solvers.},
  author = {Barrett, Clark and Nieuwenhuis, Robert and Oliveras, Albert and Tinelli, Cesare},
  institution = {Department of Computer Science, University of Iowa},
  month = aug,
  number = {06-05},
  posted-at = {2011-02-08 17:46:51},
  priority = {2},
  title = {Splitting on Demand in {SAT Modulo Theories} (TR)},
  url = {http://cs.nyu.edu/acsys/pubs/BNO+06-TR.pdf},
  year = {2006}
}
@inproceedings{BNO+06,
  abstract = {Lazy algorithms for Satisfiability Modulo Theories ({SMT}) combine a generic {DPLL}-based {SAT} engine with a theory solver for the given theory T that can decide the T-consistency of conjunctions of ground literals. For many theories of interest, theory solvers need to reason by performing internal case splits. Here we argue that it is more convenient to delegate these case splits to the {DPLL} engine instead. The delegation can be done on demand for solvers that can encode their internal case splits into one or more clauses, possibly including new constants and literals. This results in drastically simpler theory solvers. We present this idea in an improved version of {DPLL}(T), a general {SMT} architecture for the lazy approach, and formalize and prove it correct in an extension of Abstract {DPLL} Modulo Theories, a framework for modeling and reasoning about lazy algorithms for {SMT}. A remarkable additional feature of the architecture, also discussed in the paper, is that it naturally includes an efficient {Nelson-Oppen}-like combination of multiple theories and their solvers.},
  author = {Barrett, Clark and Nieuwenhuis, Robert and Oliveras, Albert and Tinelli, Cesare},
  booktitle = {Proceedings of the 13th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR '06)},
  doi = {10.1007/11916277_35},
  editor = {Hermann, Miki and Voronkov, Andrei},
  month = nov,
  pages = {512--526},
  posted-at = {2011-02-08 17:46:45},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {Splitting on Demand in {SAT} Modulo Theories},
  url = {http://cs.nyu.edu/acsys/pubs/BNO+06.ps},
  volume = {4246},
  year = {2006}
}
@inproceedings{GBT07,
  abstract = {First order logic provides a convenient formalism for describing a wide variety
of verification conditions.  Two main approaches to checking such conditions
are pure first order automated theorem proving ({ATP}) and automated theorem proving based on satisfiability modulo theories ({SMT}).  Traditional {ATP} systems are designed to handle quantifiers easily, but often have difficulty reasoning with respect to theories.  {SMT} systems, on the other hand, have built-in support for many useful theories, but have a much more difficult time with quantifiers.  One clue on how to get the best of both worlds can be found in the legacy system Simplify which combines built-in theory reasoning with quantifier instantiation heuristics.  Inspired by Simplify and motivated by a desire to provide a competitive alternative to {ATP} systems, this paper describes a methodology for reasoning about quantifiers in {SMT} systems.  We present the methodology in the context of the Abstract {DPLL} Modulo Theories framework.  Besides adapting many of Simplify's techniques, we also introduce a number of new heuristics.  Most important is the notion of instantiation level which provides an effective mechanism for prioritizing and managing the large search space inherent in quantifier instantiation techniques.  These techniques have been implemented in the {SMT} system {CVC3}.  Experimental results show that our methodology enables {CVC3} to solve a significant number of benchmarks that were not solvable with any previous approach.},
  author = {Ge, Yeting and Barrett, Clark and Tinelli, Cesare},
  booktitle = {Proceedings of the 21st International Conference on Automated Deduction (CADE '07)},
  doi = {10.1007/978-3-540-73595-3_12},
  editor = {Pfenning, Frank},
  month = jul,
  pages = {167--182},
  posted-at = {2011-02-08 17:46:40},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Artificial Intelligence},
  title = {Solving Quantified Verification Conditions using Satisfiability Modulo Theories},
  url = {http://cs.nyu.edu/acsys/pubs/GBT07.ps},
  volume = {4603},
  year = {2007}
}
@inproceedings{BdMS05,
  abstract = {Report of the first competition for Satisfiability Modulo Theories ({SMT}-{COMP} 2005).},
  author = {Barrett, Clark and de Moura, Leonardo and Stump, Aaron},
  booktitle = {Proceedings of the 17th International Conference on Computer Aided Verification (CAV '05)},
  doi = {10.1007/11513988_4},
  editor = {Etessami, Kousha and Rajamani, Sriram K.},
  month = jul,
  pages = {20--23},
  posted-at = {2011-02-08 17:46:33},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{SMT-COMP}: Satisfiability Modulo Theories Competition},
  url = {http://cs.nyu.edu/acsys/pubs/BdMS05.ps},
  volume = {3576},
  year = {2005}
}
@inproceedings{BPZ07,
  abstract = {We define the class of single-parent heap systems, which rely
on a singly-linked heap in order to model destructive updates on
tree structures.  This encoding has the advantage of relying on a
relatively simple theory of linked lists in order to support
abstraction computation. To facilitate the application of this
encoding, we provide a program transformation that, given a program
operating on a multi-linked heap without sharing, transforms it into
one over a single-parent heap. It is then possible to apply shape
analysis by predicate and ranking abstraction as in
\cite{BPZ05}. The technique has been successfully applied on
examples with trees of fixed arity (balancing of and insertion into
a binary sort tree).},
  author = {Balaban, Ittai and Pnueli, Amir and Zuck, Lenore},
  booktitle = {Proc. of the 8th Int. Conference on Verification, Model Checking, and Abstract Interpretation},
  doi = {10.1007/978-3-540-69738-1_7},
  editor = {Cook, Byron and Podelski, Andreas},
  posted-at = {2011-02-08 17:46:00},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lect. Notes in Comp. Sci.},
  title = {Shape Analysis of {Single-Parent} Heaps},
  url = {http://cs.nyu.edu/acsys/pubs/single-parent.pdf},
  year = {2007}
}
@inproceedings{BPZ05,
  abstract = {The paper presents an approach for shape analysis based on predicate
abstraction. Using a predicate base which involves reachability
relations between program variables pointing into the heap, we are
able to analyze functional properties of programs with destructive
heap updates, such as list reversal, and various in-place list
sorts. The approach allows verification of both safety and liveness
properties. The abstraction we use does not require any abstract
representation of the heap nodes (e.g. abstract shapes), but only 
reachability relations between the program variables.

The computation of the abstract transition relation is precise and
automatic but does not require the use of a theorem prover. Instead,
we use a small model theorem to identify a truncated (small)
finite-state version of the program whose abstraction is identical to
the abstraction of the unbounded-heap version of the same program. The 
abstraction of the finite-state version is then computed by {BDD} 
techniques.

For proving liveness properties, we augment the original system by a
well-founded ranking function, which is abstracted together with the
system. Well-foundedness is then abstracted into strong fairness
(compassion). We show that, for a limited class of programs which
still includes many interesting cases, the small model theorem can be
applied to this joint abstraction.

Independently of the application to shape-analysis examples, we
demonstrate the utility of the ranking abstraction method, and its
advantages over the direct use of ranking functions in a deductive
verification of the same property.},
  author = {Balaban, Ittai and Pnueli, Amir and Zuck, Lenore},
  booktitle = {VMCAI'05},
  doi = {10.1007/978-3-540-30579-8_12},
  month = jan,
  posted-at = {2011-02-08 17:45:55},
  priority = {2},
  title = {Shape Analysis by Predicate Abstraction},
  url = {http://cs.nyu.edu/acsys/pubs/shape-analysis-pred-abstraction.ps},
  year = {2005}
}
@phdthesis{BAL07,
  abstract = {The goal of shape analysis is to analyze properties of
programs that perform destructive updating on
dynamically allocated storage (heaps).
In the past various frameworks have been proposed, most notable being
the line of work based on shape graphs and canonical abstraction.
Frameworks have been proposed since, among them based on counter automata,
predicate abstraction, and separation logic.
However, among these examples there has been little effort in dealing with
liveness properties (e.g., termination) of systems whose verification depends
on deep heap properties.

This dissertation presents a new shape analysis framework based on
predicate abstraction, program augmentation, and
model checking. The combination of predicate abstraction and program
augmentation is collectively known as Ranking Abstraction and provides
a powerful model for verification of liveness properties of
sequential and concurrent systems.
A new predicate abstraction method is presented allowing for
automatic computation of abstract systems that does not rely on theorem
provers.
This approach has several intrinsic limitations, most notably on the class of
analyzable heap shapes. Thus several extensions are described that allow for
complex heap shapes.},
  address = {New York, NY, U.S.A.},
  author = {Balaban, Ittai},
  month = may,
  posted-at = {2011-02-08 17:45:49},
  priority = {2},
  school = {New York University},
  title = {Shape Analysis by Augmentation, Abstraction, and Transformation},
  url = {http://www.cs.nyu.edu/web/Research/Theses/balaban_ittai.pdf},
  year = {2007}
}
@inproceedings{BGZ03,
  abstract = {Translation validation is an approach for validating the output of
optimizing compilers.  Rather than verifying the compiler itself, translation
validation mandates that every run of the compiler generate a formal proof that
the produced target code is a correct implementation of the source code.
Speculative loop optimizations are aggressive optimizations which are only
correct under certain conditions which cannot be validated at compile time.
We propose using an automatic theorem prover together with the
translation validation framework to automatically generate run-time tests for
such speculative optimizations. This run-time validation approach must
not only detect the conditions under which an optimization generates incorrect
code, but also provide a way to recover from the optimization without aborting
the program or producing an incorrect result.  In this paper, we apply the
run-time validation technique to a class of speculative reordering
transformations and give some initial results of run-time tests generated by
the theorem prover {CVC}.},
  author = {Barrett, Clark and Goldberg, Benjamin and Zuck, Lenore},
  booktitle = {Proceedings of the 3rd International Workshop on Run-time Verification (RV '03)},
  doi = {10.1016/S1571-0661(04)81044-2},
  editor = {Sokolsky, Oleg and Viswanathan, Mahesh},
  month = jul,
  pages = {87--105},
  posted-at = {2011-02-08 17:45:18},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Run-Time Validation of Speculative Optimizations using {CVC}},
  url = {http://cs.nyu.edu/acsys/pubs/BGZ03.ps},
  volume = {89(2)},
  year = {2003}
}
@inproceedings{BCP06,
  abstract = {We present a method for model-checking of safety and liveness
properties over procedural programs, by combining state and
ranking abstractions with procedure summarization. Our
abstraction is an augmented finitary abstraction
\cite{KP00-icomp,BPZ05}, meaning that a concrete procedural program
is first augmented with a well founded ranking function, and then
abstracted by a finitary state abstraction. This results in a
procedural abstract program with strong fairness requirements which
is then reduced to a finite-state fair discrete system (FDS)
using procedure summarization. This FDS is then model
checked for the property.},
  author = {Balaban, Ittai and Cohen, Ariel and Pnueli, Amir},
  booktitle = {VMCAI'2006: Verification, Model Checking, and Abstract Interpretation},
  doi = {10.1007/11609773_18},
  posted-at = {2011-02-08 17:45:12},
  priority = {2},
  title = {Ranking Abstraction of Recursive Programs},
  url = {http://cs.nyu.edu/acsys/pubs/ranking-abstraction-procedures.pdf},
  year = {2006}
}
@inproceedings{BPZranking05,
  abstract = {Predicate abstraction has become one of the most successful methodologies
for proving safety properties of programs. Recently, several abstraction
methodologies have been proposed for proving liveness properties. This paper
studies ranking abstraction where a program is augmented by a
nonconstraining progress monitor, and further abstracted by predicate-
abstraction, to allow for automatic verification of progress 
properties. Unlike most liveness methodologies, the augmentation does not 
require a complete ranking function that is expected to decrease with each 
step. Rather, the inputs are component rankings from which a complete 
ranking function may be formed.

The premise of the paper is an analogy between the methods of ranking 
abstraction and predicate abstraction, one ingredient of which is refinement: 
When predicate abstraction fails, one can refine it. When ranking abstraction 
fails, one must determine whether the predicate abstraction, or the ranking 
abstraction, need be refined. The paper presents strategies for determining 
which case is at hand.

The other part of the analogy is that of automatically deriving deductive proof 
constructs: Predicate abstraction is often used to derive program invariants for 
proving safety properties as a boolean combination of the given predicates. 
Deductive proof of progress properties requires well-founded ranking functions 
instead of invariants. We show how to obtain concrete global ranking functions 
from abstract programs.

We demonstrate the various methods on examples with nested loops, 
including a bubble sort algorithm on linked lists.},
  author = {Balaban, Ittai and Pnueli, Amir and Zuck, Lenore},
  booktitle = {Formal Techniques for Networked and Distributed Systems (FORTE) 2005},
  doi = {10.1007/11562436_1},
  posted-at = {2011-02-08 17:45:02},
  priority = {2},
  title = {Ranking Abstraction as Companion to Predicate Abstraction},
  url = {http://cs.nyu.edu/acsys/pubs/forte05-refinement.pdf},
  year = {2005}
}
@inproceedings{CCJR07,
  abstract = {Asynchronous systems components are hard to write, hard to reason about, and (not coincidentally) hard to mechanically verify. In order to achieve high performance, asynchronous code is often written in an event-driven style that introduces non-sequential control flow and persistent heap data to track pending operations. As a result, existing sequential verification and static analysis tools are ineffective on event-driven code.
We describe {CLARITY}, a programming language that enables analyzable design of asynchronous components. {CLARITY} has three novel features: (1) Nonblocking function calls which allow event-driven code to be written in a sequential style. If a blocking statement is encountered during the execution of such a call, the call returns and the remainder of the operation is automatically queued for later execution.  (2) Coords, a set of high-level coordination primitives, which encapsulate common interactions between asynchronous components and make high-level coordination protocols explicit.  (3) Linearity annotations, which delegate coord protocol obligations to exactly one thread at each asynchronous function call, transforming a concurrent analysis problem into a sequential one.
 
We demonstrate how these language features enable both a more intuitive expression of program logic and more effective program analysis---most checking is done using simple sequential analysis. We describe our experience in developing a network device driver with {CLARITY}. We are able to mechanically verify several properties of the {CLARITY} driver that are beyond the reach of current analysis technology applied to equivalent C code.},
  address = {Dubrovnik, Croatia},
  author = {Chandrasekaran, Prakash and Conway, Christopher L. and Joy, Joseph M. and Rajamani, Sriram K.},
  booktitle = {Foundations of Software Engineering (FSE)},
  doi = {10.1145/1287624.1287636},
  month = sep,
  pages = {65--74},
  posted-at = {2011-02-08 17:44:49},
  priority = {2},
  title = {Programming Asynchronous Layers with {CLARITY}},
  url = {http://cs.nyu.edu/acsys/pubs/CCJR07.pdf},
  year = {2007}
}
@techreport{CDNB08-TR,
  abstract = {It is well known that the use of points-to information can substantially improve the accuracy of a static program analysis.  Commonly used algorithms for computing points-to information are known to be sound only for memory-safe programs.  Thus, it appears problematic to utilize points-to information to verify the memory safety property without giving up soundness.  We show that a sound combination is possible, even if the points-to information is computed separately and only conditionally sound.  This result is based on a refined statement of the soundness conditions of points-to analyses and a general mechanism for composing conditionally sound analyses.},
  author = {Conway, Christopher L. and Dams, Dennis and Namjoshi, Kedar S. and Barrett, Clark},
  institution = {New York University, Dept. of Computer Science},
  number = {TR2008-910},
  posted-at = {2011-02-08 17:44:43},
  priority = {2},
  title = {Points-to Analysis, Conditional Soundness, and Proving the Absence of Errors},
  url = {http://cs.nyu.edu/acsys/pubs/conway-2008-TR-pointer.pdf},
  year = {2008}
}
@inproceedings{CDNB08,
  abstract = {It is well known that the use of points-to information can substantially improve the accuracy of a static program analysis.  Commonly used algorithms for computing points-to information are known to be sound only for memory-safe programs. Thus, it appears problematic to utilize points-to information to verify the memory safety property without giving up soundness.  We show that a sound combination is possible, even if the points-to information is computed separately and only conditionally sound.  This result is based on a refined statement of the soundness conditions of points-to analyses and a general mechanism for composing conditionally sound analyses.},
  address = {Valencia, Spain},
  author = {Conway, Christopher L. and Dams, Dennis and Namjoshi, Kedar S. and Barrett, Clark},
  booktitle = {Static Analysis Symposium (SAS)},
  doi = {http://dx.doi.org/10.1007/978-3-540-69166-2_5},
  month = jul,
  pages = {62--77},
  posted-at = {2011-02-08 17:44:37},
  priority = {2},
  title = {Points-to Analysis, Conditional Soundness, and Proving the Absence of Errors},
  url = {http://cs.nyu.edu/acsys/pubs/conway-2008-SAS-pointer.pdf},
  year = {2008}
}
@inproceedings{CN08,
  abstract = {This paper develops a local reasoning method to check linear time temporal properties of concurrent programs. In practice, it is often infeasible to model check over the product state space of a concurrent program. The method developed in this paper replaces such global reasoning with checks of (abstracted) individual processes. An automatic refinement step gradually exposes local state if necessary, ensuring that the method is complete. Experiments with a prototype implementation show that local reasoning can hold a significant advantage over global reasoning.},
  author = {Cohen, Ariel and Namjoshi, Kedar S.},
  booktitle = {Computer Aided Verification, 20th International Conference, CAV 2008, Princeton, NJ, USA, July 2008, Proceedings},
  doi = {10.1007/978-3-540-70545-1_15},
  pages = {149--161},
  posted-at = {2011-02-08 17:44:01},
  priority = {2},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {Local Proofs for {Linear-Time} Properties of Concurrent Programs},
  url = {http://cs.nyu.edu/acsys/pubs/cav08sl.pdf},
  volume = {5123},
  year = {2008}
}
@inproceedings{CN07,
  abstract = {This paper explores the concept of locality in proofs of global safety
  properties of asynchronously composed, multi-process programs. Model
  checking on the full state space is often infeasible due to state explosion.
  A local proof, in contrast, is a collection of per-process invariants, which together imply the
  global safety property. 
  Local proofs can be compact: but a central problem is that local reasoning is
  incomplete.  In this paper,  we present a ``completion''  algorithm, which
  gradually exposes facts about the internal state of 
  components, until either a local proof or a real error is discovered.
  Experiments show that local reasoning can have significantly better
  performance over a reachability computation. Moreover, for some parameterized protocols, 
  a local proof can be used to show correctness for all instances.},
  author = {Cohen, Ariel and Namjoshi, Kedar S.},
  booktitle = {Computer Aided Verification, 19th International Conference, CAV 2007, Berlin, Germany, July 3-7, 2007, Proceedings},
  doi = {10.1007/s10703-008-0063-8},
  pages = {55--67},
  posted-at = {2011-02-08 17:43:55},
  priority = {2},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {Local Proofs for Global Safety Properties},
  url = {http://cs.nyu.edu/acsys/pubs/local.pdf},
  volume = {4590},
  year = {2007}
}
@inproceedings{BPZ06distributed,
  abstract = {The method of ``Invisible Invariants'' has been applied successfully to
protocols that assume a ``symmetric'' underlying topology, be it cliques,
stars, or rings.  In this paper we show how the method can be applied to
proving safety properties of distributed protocols running under arbitrary
topologies.  Many safety properties of such protocols have reachability
predicates, which, at first glance, are beyond the scope of the Invisible
Invariants method.  To overcome this difficulty, we present a technique,
called ``coloring,'' that allows, in many instances, to replace the second
order reachability predicates by first order predicates, resulting in
properties that are amenable to Invisible Invariants. We demonstrate our
techniques on several distributed protocols, including a variant on Luby's
Maximal Independent Set protocol, the Leader Election protocol used in the
{IEEE} 1394 (Firewire) distributed bus protocol, and various distributed
spanning tree algorithms.
All examples have been tested using the symbolic model checker {TLV}.},
  author = {Balaban, Ittai and Pnueli, Amir and Zuck, Lenore D.},
  booktitle = {Automata, Languages and Programming, 33rd International Colloquium, Part II},
  doi = {10.1007/11787006_45},
  pages = {528--539},
  posted-at = {2011-02-08 17:43:42},
  priority = {2},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {Invisible Safety of Distributed Protocols},
  url = {http://cs.nyu.edu/acsys/pubs/dist-protocols.pdf},
  volume = {4052},
  year = {2006}
}
@inproceedings{GZB05,
  abstract = {Translation Validation is a technique for ensuring that the target code produced by a translator is a correct translation of the source code. Rather than verifying the translator itself, translation validation validates the correctness of each translation, generating a formal proof that it is indeed a correct. Recently, translation validation has been applied to prove the correctness of compilation in general, and optimizations in particular.

Tvoc, a tool for the Translation Validation of Optimizing Compilers developed by the authors and their colleagues, successfully handles many optimizations employed by Intel's {ORC} compiler. Tvoc, however, is somewhat limited when dealing with loop reordering transformations. First, in the theory upon which it is based, separate proof rules are needed for different categories of loop reordering transformations. Second, Tvoc has difficulties dealing with combinations of optimizations that are performed on the same block of code. Finally, Tvoc relies on information, provided by the compiler, indicating which optimizations have been performed (in the case of the current {ORC}, this instrumentation is fortunately part of the compiler).

This paper addresses all the issues above. It presents a uniform proof rule that encompasses all reordering transformations performed by the Intel {ORC} compiler, describes a methodology for translation validation in the presence of combinations of optimizations, and presents heuristics for determining which optimizations occurred (rather than relying on the compiler for this information).},
  author = {Goldberg, Benjamin and Zuck, Lenore and Barrett, Clark},
  booktitle = {Proceedings of the 3rd International Workshop on Compiler Optimization meets Compiler Verificaiton (COCV '04)},
  doi = {10.1016/j.entcs.2005.01.030},
  editor = {Knoop, J. and Necula, G. C. and Zimmermann, W.},
  month = may,
  pages = {53--71},
  posted-at = {2011-02-08 17:43:36},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Into the Loops: Practical Issues in Translation Validation for Optimizing Compilers},
  url = {http://cs.nyu.edu/acsys/pubs/GZB04.ps},
  volume = {132(1)},
  year = {2005}
}
@inproceedings{BFPZ05,
  abstract = {This paper describes the Invisible Invariant Verifier (IIV) --- an automatic tool 
for the generation of inductive invariants, based on the work in 
\cite{PRZ01,APRXZ01,FPPZ04,ZP04}.
The inputs to IIV are a parameterized system and an invariance
property p, and the output of IIV is ``success'' if it finds an inductive 
invariant that strengthens p and ``fail'' otherwise.},
  author = {Balaban, Ittai and Fang, Yi and Pnueli, Amir and Zuck, Lenore},
  booktitle = {Computer Aided Verification (CAV) 2005},
  doi = {10.1007/11513988_39},
  posted-at = {2011-02-08 17:43:31},
  priority = {2},
  title = {{IIV}: An Invisible Invariant Verifier},
  url = {http://cs.nyu.edu/acsys/pubs/iiv-cav05.pdf},
  year = {2005}
}
@inproceedings{PGZ05,
  abstract = {Live Sequence Charts ({LSCs}) provide a formal visual framework
for creating scenario-based requirements for reactive systems.
An {LSC} imposes a partial order over a set of events, such as the
sending or receiving of messages.  Each event is associated with
a liveness property, indicating whether it can or must occur. 
Time extensions can further specify when these events should
occur. An individual {LSC} tells a story about particular fragment of
system behavior, whereas {LSC} specifications---a finite set of
{LSCs}---collectively define the total behavior.   An {LSC}
specification may require that more than one distinct scenario, or
multiple instances of the same scenario, execute concurrently.  It
is natural  to ask whether {LSC} specifications can be synthesized
into formal languages. Previous work offers translations from
untimed {LSCs} to finite state machines, and from single (non-
concurrent) {LSCs} to timed automata. Here, we show that {LSC}
specifications with concurrency can also be expressed as a timed
automata.},
  author = {Plock, Cory and Goldberg, Benjamin and Zuck, Lenore},
  booktitle = {Proc. 12th Annual IEEE Intl. Conference and Workshop on the Engineering of Computer-Based Systems},
  doi = {10.1109/ECBS.2005.41},
  posted-at = {2011-02-08 17:43:21},
  priority = {2},
  publisher = {IEEE Computer Society Press},
  title = {From Requirements to Specifications},
  url = {http://cs.nyu.edu/acsys/pubs/main.ps},
  year = {2005}
}
@misc{null,
  author = {Cohen, Ariel and Pnueli, Amir and Zuck, Lenore D.},
  booktitle = {Exploiting Concurrency Efficiently and Correctly (EC2)},
  posted-at = {2011-02-08 17:43:14},
  priority = {2},
  title = {Formal Verification of Transactional Memories},
  url = {http://cs.nyu.edu/acsys/pubs/ec2_08.pdf},
  year = {2008}
}
@article{BdMS05-JAR,
  abstract = {The Satisfiability Modulo Theories Competition ({SMT}-{COMP}) is intended to spark further advances in the decision procedures field, especially for applications in hardware and software verification.  Public competitions are a well-known means of stimulating advancement in automated reasoning.  Evaluation of {SMT} solvers entered in {SMT}-{COMP} took place while {CAV} 2005 was meeting. Twelve solvers were entered, 1352 benchmarks were collected in seven different divisions.},
  author = {Barrett, Clark and de Moura, Leonardo and Stump, Aaron},
  doi = {10.1007/s10817-006-9026-1},
  journal = {Journal of Automated Reasoning},
  month = nov,
  number = {4},
  pages = {373--390},
  posted-at = {2011-02-08 17:43:04},
  priority = {2},
  publisher = {Springer Netherlands},
  title = {Design and Results of the First Satisfiability Modulo Theories Competition ({SMT-COMP} 2005)},
  url = {http://cs.nyu.edu/acsys/pubs/BdMS061.ps},
  volume = {35},
  year = {2005}
}
@inproceedings{BB04,
  abstract = {We describe a tool called {CVC} Lite ({CVCL}), an automated theorem prover for formulas in a union of first-order theories. {CVCL} supports a set of theories which are useful in verification, including uninterpreted functions, arrays, records and tuples, and linear arithmetic. New features in {CVCL} (beyond those provided in similar previous systems) include a library {API}, more support for producing proofs, some heuristics for reasoning about quantifiers, and support for symbolic simulation primitives.},
  author = {Barrett, Clark and Berezin, Sergey},
  booktitle = {Proceedings of the 16th International Conference on Computer Aided Verification (CAV '04)},
  doi = {10.1007/978-3-540-27813-9_49},
  editor = {Alur, Rajeev and Peled, Doron A.},
  month = jul,
  pages = {515--518},
  posted-at = {2011-02-08 17:42:39},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{CVC L}ite: A New Implementation of the Cooperating Validity Checker},
  url = {http://cs.nyu.edu/acsys/pubs/BB04.ps},
  volume = {3114},
  year = {2004}
}
@inproceedings{BT07,
  abstract = {{CVC3}, a joint project of {NYU} and U Iowa, is the new and latest version of the Cooperating Validity Checker.  {CVC3} extends and builds on the functionality of its predecessors and includes many new features such as support for additional theories, an abstract architecture for Boolean reasoning, and {SMT}-{LIB} compliance.  We describe the system and discuss some applications and continuing work.},
  author = {Barrett, Clark and Tinelli, Cesare},
  booktitle = {Proceedings of the 19th International Conference on Computer Aided Verification (CAV '07)},
  doi = {10.1007/978-3-540-73368-3_34},
  editor = {Damm, Werner and Hermanns, Holger},
  month = jul,
  pages = {298--302},
  posted-at = {2011-02-08 17:42:35},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{CVC3}},
  url = {http://cs.nyu.edu//acsys/pubs/BT07.ps},
  volume = {4590},
  year = {2007}
}
@inproceedings{MBG06,
  abstract = {This paper is a case study in combining theorem provers. We define a derived rule in {HOL}-Light, {CVC\_PROVE}, which calls {CVC} Lite and translates the resulting proof object back to {HOL}-Light. As a result, we obtain a highly trusted proof-checker for {CVC} Lite, while also fundamentally expanding the capabilities of {HOL}-Light.},
  author = {McLaughlin, Sean and Barrett, Clark and Ge, Yeting},
  booktitle = {Proceedings of the \$3^rd\$ Workshop on Pragmatics of Decision Procedures in Automated Reasoning (PDPAR '05)},
  doi = {10.1016/j.entcs.2005.12.005},
  editor = {Armando, Alessandro and Cimatti, Alessandro},
  month = jan,
  pages = {43--51},
  posted-at = {2011-02-08 17:42:29},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Cooperating Theorem Provers: A Case Study Combining {HOL-Light} and {CVC L}ite},
  url = {http://cs.nyu.edu/acsys/pubs/MBG06.ps},
  volume = {144(2)},
  year = {2006}
}
@techreport{ZP07,
  abstract = {The paper presents a framework for proving program equivalence and its application 
to verification of transformations performed by optimizing compilers. To leverage existing 
program analysis techniques, we convert the equivalence checking problem to analysis of one 
system - a cross-product of the two input programs. Further, we show how it can be applied 
in a setting, when the compiler is treated as a black box, and no compiler annotations are 
available. Finally, we report on the prototype tool {CoVaC} that applies the developed 
methodology to verification of the {LLVM} compiler. {CoVaC} handles many of the classical 
intraprocedural compiler optimizations such as constant folding, reassociation, common 
subexpression elimination, code motion, dead code elimination, branch optimizations, 
and others.},
  author = {Zaks, Anna and Pnueli, Amir},
  posted-at = {2011-02-08 17:42:23},
  priority = {2},
  title = {Compiler Validation by Program Analysis of the {Cross-Product}},
  url = {http://cs.nyu.edu/acsys/pubs/covacTR2.pdf},
  year = {2007}
}
@inproceedings{BD05,
  abstract = {A decision procedure for arbitrary first-order formulas can be viewed as combining a propositional search with a decision procedure for conjunctions of first-order literals, so Boolean {SAT} methods can be used for the propositional search in order to improve the performance of the overall decision procedure. We show how to combine some Boolean {SAT} methods with non-clausal heuristics developed for first-order decision procedures. The combination of methods leads to a smaller number of decisions than either method alone.},
  author = {Barrett, Clark and Donham, Jacob},
  booktitle = {Selected Papers from the Workshops on Disproving and the Second International Workshop on Pragmatics of Decision Procedures (PDPAR '04)},
  doi = {10.1016/j.entcs.2004.09.042},
  editor = {Ahrendt, Wolfgang and Baumgartner, Peter and de Nivelle, Hans and Ranise, Silvio and Tinelli, Cesare},
  month = jul,
  pages = {3--12},
  posted-at = {2011-02-08 17:42:18},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Combining {SAT} Methods with Non-Clausal Decision Heuristics},
  url = {http://cs.nyu.edu/acsys/pubs/BD041.ps},
  volume = {125(3)},
  year = {2005}
}
@inproceedings{SB06,
  abstract = {We present a tool, called {CASCADE}, to check assertions in C programs as part of a multi-stage verification strategy. {CASCADE} takes as input a C program and a control file (the output of an earlier stage) that specifies one or more assertions to be checked together with (optionally) some restrictions on program behaviors. For each assertion, {CASCADE} produces either a concrete trace violating the assertion or a deduction (proof) that the assertion cannot be violated.},
  author = {Sethi, Nikhil and Barrett, Clark},
  booktitle = {Proceedings of the 18th International Conference on Computer Aided Verification (CAV '06)},
  doi = {10.1007/11817963_17},
  editor = {Ball, Thomas and Jones, Robert B.},
  month = aug,
  pages = {166--169},
  posted-at = {2011-02-08 17:42:10},
  priority = {2},
  publisher = {Springer-Verlag},
  series = {Lecture Notes in Computer Science},
  title = {{CASCADE}: C Assertion Checker and Deductive Engine},
  url = {http://cs.nyu.edu/acsys/pubs/SB06.ps},
  volume = {4144},
  year = {2006}
}
@inproceedings{BB03,
  abstract = {We present a proof-producing search engine for solving the Boolean
satisfiability problem.  We show how the proof-producing infrastructure can be
used to track the dependency information needed to implement important
optimizations found in modern {SAT} solvers.  We also describe how the
same search engine can be extended to work with decision procedures for
quantifier-free first-order logic.  Initial results indicate that it is
possible to extend a state-of-the-art {SAT} solver with proof production in a way
that both preserves the algorithmic performance (e.g. the number of decisions
to solve a problem) and does not incur unreasonable overhead for the proofs.},
  author = {Barrett, Clark and Berezin, Sergey},
  booktitle = {Proceedings of the 1st International Workshop on Pragmatics of Decision Procedures in Automated Reasoning (PDPAR '03)},
  month = jul,
  posted-at = {2011-02-08 17:40:11},
  priority = {2},
  title = {A {Proof-Producing} Boolean Search Engine},
  url = {http://cs.nyu.edu/acsys/pubs/BB03.ps},
  year = {2003}
}
@inproceedings{BBS+05,
  abstract = {Most verification approaches assume a mathematical formalism in which functions are total, even though partial functions occur naturally in many applications. Furthermore, although there have been various proposals for logics of partial functions, there is no consensus on which is "the right" logic to use for verification applications. In this paper, we propose using a three-valued Kleene logic, where partial functions return the "undefined" value when applied outside of their domains. The particular semantics are chosen according to the principle of least surprise to the user; if there is disagreement among the various approaches on what the value of the formula should be, its evaluation is undefined. We show that the problem of checking validity in the three-valued logic can be reduced to checking validity in a standard two-valued logic, and describe how this approach has been successfully implemented in our tool, {CVC} Lite.},
  author = {Berezin, Sergey and Barrett, Clark and Shikanian, Igor and Chechik, Marsha and Gurfinkel, Arie and Dill, David L.},
  booktitle = {Selected Papers from the Workshops on Disproving and the Second International Workshop on Pragmatics of Decision Procedures (PDPAR '04)},
  doi = {10.1016/j.entcs.2004.06.064},
  editor = {Ahrendt, Wolfgang and Baumgartner, Peter and de Nivelle, Hans and Ranise, Silvio and Tinelli, Cesare},
  month = jul,
  pages = {13--23},
  posted-at = {2011-02-08 17:40:05},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {A Practical Approach to Partial Functions in {CVC L}ite},
  url = {http://cs.nyu.edu/acsys/pubs/BB+04.ps},
  volume = {125(3)},
  year = {2005}
}
@article{SJO+05,
  abstract = {The Forte formal verification environment for datapath-dominated hardware is described. Forte has proven to be effective in large-scale industrial trials and combines an efficient linear-time logic model-checking algorithm, namely the symbolic trajectory evaluation ({STE}), with lightweight theorem proving in higher-order logic. These are tightly integrated in a general-purpose functional programming language, which both allows the system to be easily customized and at the same time serves as a specification language. The design philosophy behind Forte is presented and the elements of the verification methodology that make it effective in practice are also described.},
  author = {Seger, Carl-Johan H. and Jones, Robert B. and O'Leary, John W. and Melham, Tom and Aagaard, Mark D. and Barrett, Clark and Syme, Don},
  doi = {10.1109/TCAD.2005.850814},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
  month = sep,
  number = {9},
  pages = {1381--1405},
  posted-at = {2011-02-08 17:39:59},
  priority = {2},
  title = {An Industrially Effective Environment for Formal Hardware Verification},
  url = {http://cs.nyu.edu/acsys/pubs/SJO+05.pdf},
  volume = {24},
  year = {2005}
}
@inproceedings{BST07-PDPAR,
  abstract = {The theory of recursive data types is a valuable modeling tool for software verification. In the past, decision procedures have been proposed for both the full theory and its universal fragment. However, previous work has been limited in various ways. In this paper, we present a general algorithm for the universal fragment. The algorithm is presented declaratively as a set of abstract rules which are terminating, sound, and complete. We show how other algorithms can be realized as strategies within our general framework. Finally, we propose a new strategy and give experimental results showing that it performs well in practice.},
  author = {Barrett, Clark and Shikanian, Igor and Tinelli, Cesare},
  booktitle = {Combined Proceedings of the 4th Workshop on Pragmatics of Decision Procedures in Automated Reasoning (PDPAR '06) and the 1st International Workshop on Probabilistic Automata and Logics (PaUL '06)},
  doi = {10.1016/j.entcs.2006.11.037},
  editor = {Cook, Byron and Sebastiani, Roberto},
  month = jun,
  pages = {23--37},
  posted-at = {2011-02-08 17:39:50},
  priority = {2},
  publisher = {Elsevier},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {An Abstract Decision Procedure for Satisfiability in the Theory of Recursive Data Types},
  url = {http://cs.nyu.edu/acsys/pubs/BST06.ps},
  volume = {174(8)},
  year = {2007}
}
@techreport{BST05,
  abstract = {The theory of recursive data types is a valuable modeling tool for software verification. In the past, decision procedures have been proposed for both the full
theory and its universal fragment. However, previous work has been limited in various ways, including an inability to deal with multiple constructors, multi-sorted logic, and mutually recursive data types. More significantly, previous algorithms for the universal case have been based on inefficient nondeterministic guesses and have been described in fairly complex procedural terms. We present an algorithm which addresses these issues for the universal theory. The algorithm is presented declaratively as a set of abstract rules which are terminating, sound, and complete. We also describe strategies for applying the rules and explain why our recommended strategy is more efficient than those used by previous algorithms. Finally, we discuss how the algorithm can be used within a broader framework of cooperating decision procedures.},
  author = {Barrett, Clark and Chikanian, Igor and Tinelli, Cesare},
  institution = {Department of Computer Science, New York University},
  month = nov,
  number = {TR2005-878},
  posted-at = {2011-02-08 17:39:44},
  priority = {2},
  title = {An Abstract Decision Procedure for Satisfiability in the Theory of Recursive Data Types},
  url = {http://cs.nyu.edu/acsys/pubs/BST051.ps},
  year = {2005}
}
@article{BST07-JAR,
  abstract = {Inductive data types are a valuable modeling tool for software verification.  In the past, decision procedures have been proposed for various theories of inductive data types, some focused on the universal fragment, and some focused on handling arbitrary quantifiers.  Because of the complexity of the full theory, previous work on the full theory has not focused on strategies for practical implementation.  However, even for the universal fragment, previous work has been limited in several significant ways.  In this paper, we present a general and practical algorithm for the universal fragment.  The algorithm is presented declaratively as a set of abstract rules which we show to be terminating, sound, and complete.  We show how other algorithms can be realized as strategies within our general framework, and we propose a new strategy and give experimental results indicating that it performs well in practice.  We conclude with a discussion of several useful ways the algorithm can be  extended.},
  author = {Barrett, Clark and Shikanian, Igor and Tinelli, Cesare},
  doi = {10.1.1.106.8167},
  journal = {Journal on Satisfiability, Boolean Modeling and Computation},
  pages = {21--46},
  posted-at = {2011-02-08 17:39:37},
  priority = {2},
  publisher = {IOS Press},
  title = {An Abstract Decision Procedure for a Theory of Inductive Data Types},
  url = {http://cs.nyu.edu/acsys/pubs/BST07.ps},
  volume = {3},
  year = {2007}
}
@techreport{PS07,
  abstract = {The paper presents a new deductive rule for verifying response properties
under the assumption of compassion (strong fairness) requirements. It
improves on previous rules in that the premises of the new rule are all first
order. We prove that the rule is sound, and present a constructive completeness
proof for the case of finite-state systems. For the general case, we present a
sketch of a relative completeness proof. We report about the implementation of
the rule in {PVS} and illustrate its application on some simple but non-trivial
examples.},
  author = {Pnueli, Amir and Sa'ar, Yaniv},
  institution = {Department of Computer Science, New York University},
  month = oct,
  posted-at = {2011-02-08 17:39:31},
  priority = {2},
  title = {All You Need is Compassion ({TR})},
  url = {http://cs.nyu.edu/acsys/pubs/compassion-report-07.pdf},
  year = {2007}
}
@inproceedings{CPZ,
  abstract = {Transactional memory is a programming abstraction intended to simplify the synchronization of conflicting memory accesses (by concurrent threads) without the difficulties associated with locks. In a previous work we presented a formal framework for proving that a transactional memory implementation satisfies its specifications and provided with model checking verification of some using small instantiations. This paper extends the previous work to capture nontransactional accesses to memory, which occurs, for example, when using legacy code.  We provide a mechanical proof of the soundness of the verification method, as well as a mechanical verification of a version of the popular {TCC} implementation that includes non-transactional memory accesses. The verification is performed by the deductive temporal checker {TLPVS}.},
  author = {Cohen, Ariel and Pnueli, Amir and Zuck, Lenore D.},
  booktitle = {Computer Aided Verification, 20th International Conference, CAV 2008, Princeton, NJ, USA, July 2008, Proceedings},
  doi = {10.1007/978-3-540-70545-1_13},
  pages = {121--134},
  posted-at = {2011-02-08 17:39:02},
  priority = {2},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {Mechanical verification of transactional memories with non-transactional memory accesses},
  url = {http://cs.nyu.edu/acsys/pubs/cav08tm.pdf},
  volume = {5123},
  year = {2008}
}
@article{BPZ07ranking,
  abstract = {We define the class of single-parent heap systems, which rely on a singly-linked heap in order to model destructive updates on tree structures.  This encoding has the advantage of relying on a relatively simple theory of linked lists in order to support abstraction computation. To facilitate the application of this encoding, we provide a program transformation that, given a program operating on a multi-linked heap without sharing, transforms it into one over a single-parent heap. It is then possible to apply shape analysis by predicate and ranking abstraction as in
\cite{BPZ05}. The technique has been successfully applied on examples with trees of fixed arity (balancing of and insertion into a binary sort tree).},
  author = {Balaban, Ittai and Pnueli, Amir and Zuck, Lenore},
  doi = {10.1142/S0129054107004553},
  journal = {Int. J. Found. Comput. Sci.},
  number = {1},
  pages = {5--44},
  posted-at = {2011-02-08 17:38:53},
  priority = {2},
  title = {Modular Ranking Abstraction},
  url = {http://cs.nyu.edu/acsys/pubs/modular-ranking-abstraction.pdf},
  volume = {18},
  year = {2007}
}
@inproceedings{PZZ06,
  abstract = {We consider the problem of a module interacting with an external interface(environment) 
where the interaction is expected to satisfy some system specification \Phi.
While we have the full implementation details of the module, we are only given 
a partial external specification for the interface. The interface specification being 
partial (incomplete) means that the interface displays only a strict subset of the 
behaviors allowed by the interface specification.

Based on the assumption that interface specifications are typically incomplete, 
we address the question of whether we can tighten the interface specification  
into a strategy, consistent with the given partial specification, that will guarantee 
that all possible interactions resulting from possible behaviors of the module 
will satisfy the system specification \Phi. We refer to such a tighter specification 
as \Phi-guaranteeing specification. Rather than verifying whether 
the interface, which is often an off-the-shelf component, satisfies the tighter specification, 
the paper proposes a construction of a run-time monitor which continuously checks 
the existence of a \Phi-guaranteeing interface.

We view the module and the external interface as players in a 2-player game. 
The interface has a winning strategy if it can guarantee that no matter what the module does,
the overall specification \Phi is met. The problem of incomplete specifications 
is resolved by allowing the interface to follow any strategy consistent with the 
interface specification. Our approach essentially combines traditional 
run-time monitoring and static analysis. This allows going beyond the focus of
traditional run-time monitoring tools -- error detection in the execution trace, 
towards the focus of the static analysis -- bug detection in the programs.},
  author = {Pnueli, Amir and Zaks, Aleksandr and Zuck, Lenore},
  booktitle = {Proceedings of the 5th Workshop on Runtime Verification (RV 2005)},
  doi = {10.1016/j.entcs.2006.02.005},
  month = may,
  pages = {73--89},
  posted-at = {2011-02-08 17:38:42},
  priority = {2},
  series = {Electronic Notes in Theoretical Computer Science},
  title = {Monitoring Interfaces for Faults},
  url = {http://cs.nyu.edu/acsys/pubs/pnuelizakszuck.pdf},
  volume = {144},
  year = {2006}
}
@inproceedings{CHK05,
  abstract = {We apply the language of live sequence charts ({LSCs}) and the {Play-Engine}
tool to a real-world complex telecommunication service. The service, called 
Depannage, allows a user to make a phone call and ask for help from a doctor, 
the fire brigade, a car maintenance service, etc. This kind of service is built on 
top of an embedded platform, using both new and existing service 
components. The complexity of such applications stems from their distributed 
architecture, the various time constraints they entail,  and the fact the 
underlying systems  are rapidly evolving, introducing new components, 
protocols and associated hardware constraints, all of which must be taken into 
account. We present the results of our work on the specification, animation 
and formal verification of the Depannage service, and draw some initial 
conclusions as to an appropriate methodology for using a scenario-based 
approach in the telecommunication domain. The complete specification of the 
Depannage application in {LSCs} and some animations showing simulation and 
verification results are made available as supplementary material.},
  author = {Combes, Pierre and Harel, David and Kugler, Hillel},
  booktitle = {Proc. 3rd Int. Symp. on Automated Technology for Verification and Analysis (ATVA '05)},
  doi = {10.1007/s10270-007-0069-5},
  pages = {414--428},
  posted-at = {2011-02-08 17:35:31},
  priority = {2},
  publisher = {Springer},
  series = {LNCS},
  title = {Modeling and Verification of a Telecommunication Application using Live Sequence Charts and the {Play-Engine} Tool},
  url = {http://cs.nyu.edu/acsys/pubs/ATVA05.pdf},
  volume = {3707},
  year = {2005}
}