% Generated on 2002-09-13 14:14:29 +0200
%---------------------------------------
% _____ __
% \___ \ / / FFFFFF LL CCCC '' 0000 2222
% ___\ \__/ /_ FF LL oooo CC CC '' 00 00 22 22
% \___ __ _ `. FFFFF LL oo oo CC ' 00 00 22
% ___\ \/ /_) ) FF LL oo oo CC CC 00 00 22
% \__________,' FF LLLLLL oooo CCCC 0000 222222
%
% The 2002 Federated Logic Conference
%
% Copenhagen, Denmark, July 20 - August 1, 2002
%
% This BibTeX database of papers presented at FLoC'02 is compiled
% from data submitted by the organizers of the various submeetings of
% FLoC. The accuracy of the data with respect to the actual proceedings
% may vary; however:
%
% * The authors and titles closely follow what appears in the program
% booklet handed out to FLoC participants (any differences are
% due to corrections that reached the webmaster after the booklet
% went to press).
% In a few cases, the author names given here are slightly different
% from the ones reported me by the meeting organizers. The changes
% aim at identifying the author more accurately, for example by
% expanding intials and inserting middle intials and accents that
% were omitted by my sources.
%
% * The abstracts are the same as appear on the FLoC website. Some
% submeeting organizers have exhibited a suspicious absence of
% updates to the abstract data as their final-papers deadline
% rolled by; so the abstracts may not match the abstracts in the
% proceedings word by word.
%
% Only articles for which the submeeting organizer has submitted
% page numbers within the proceedings are included in the database.
%
% Henning Makholm
% FLoC webmaster
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CADE - Automated Deduction -- {CADE-18}
% ================================
@InProceedings{Horrocks:CADE:2002,
author = "Ian Horrocks",
title = "Reasoning with Expressive Description Logics: Theory
and Practice",
type = "Invited talk",
crossref = "CADE:2002",
pages = "1--15",
abstract = "Description Logics are a family of class based
knowledge representation formalisms characterised by
the use of various constructors to build complex
classes from simpler ones, and by an emphasis on the
provision of sound, complete and (empirically)
tractable reasoning services. They have a wide range of
applications, but their use as ontology languages has
been highlighted by the recent explosion of interest in
the ``Semantic Web'', where ontologies are set to play
a key role. DAML+OIL is a description logic based
ontology language specifically designed for use on the
web. The logical basis of the language means that
reasoning services can be provided, both to support
ontology design and to make DAML+OIL described web
resources more accessible to automated processes.",
}
@InProceedings{10:CADE:2002,
author = "Guoqiang Pan and Ulrike Sattler and Moshe Y. Vardi",
title = "{BDD}-Based Decision Procedures for {K}",
crossref = "CADE:2002",
pages = "16--30",
abstract = "We describe BDD-based decision procedures for K. Our
approach is inspired by the automata-theoretic
approach, but we avoid explicit automata construction.
Our algorithms compute the fixpoint of a set of types,
which are sets of formulas satisfying some consistency
conditions. We use BDDs to represent and manipulate
such sets. Experimental results show that our
algorithms are competitive with contemporary methods
using benchmarks from TANCS 98 and TANCS 2000.",
}
@InProceedings{53:CADE:2002,
author = "Andrew Bernard and Peter Lee",
title = "Temporal Logic for Proof-Carrying Code",
crossref = "CADE:2002",
pages = "31--46",
abstract = "\emph{Proof-carrying code} (PCC) is a framework for
ensuring that untrusted programs are safe to install
and execute. When using PCC, untrusted programs are
required to contain a proof that allows the program
text to be checked efficiently for safe behavior. In
this paper, we lay the foundation for a potential
engineering improvement to PCC. Specifically, we
present a practical approach to using temporal logic to
specify security policies in such a way that a PCC
system can enforce them.",
}
@InProceedings{60:CADE:2002,
author = "George C. Necula and Robert R. Schneck",
title = "A Gradual Approach to a More Trustworthy, yet
Scalable, Proof-Carrying Code",
crossref = "CADE:2002",
pages = "47--62",
abstract = "Proof-carrying code~(PCC) allows a code producer to
associate to a program a machine-checkable proof of its
safety. In the original approach to PCC, the safety
policy includes proof rules which determine how various
actions are to be proved safe. These proof rules have
been considered part of the trusted code base~(TCB) of
the PCC system. We wish to remove the proof rules from
the TCB by providing a formal proof of their soundness.
This makes the PCC system more secure, by reducing the
TCB; it also makes the system more flexible, by
allowing code producers to provide their own
safety-policy proof rules, if they can guarantee their
soundness. Furthermore this security and flexibility
are gained without any loss in the ability to handle
large programs. In this paper we discuss how to produce
the necessary formal soundness theorem given a safety
policy. As an application of the framework, we have
used the Coq system to prove the soundness of the proof
rules for a type-based safety policy for native machine
code compiled from Java.",
}
@InProceedings{50:CADE:2002,
author = "Martin Strecker",
title = "Formal Verification of a Java Compiler in Isabelle",
crossref = "CADE:2002",
pages = "63--77",
abstract = "This paper reports on the formal proof of correctness
of a compiler from a substantial subset of Java source
language to Java bytecode in the proof environment
Isabelle. This work is based on extensive previous
formalizations of Java, which comprise all relevant
features of object-orientation. We place particular
emphasis on describing the effects of design decisions
in these formalizations on the compiler correctness
proof.",
}
@InProceedings{63:CADE:2002,
author = "Uwe Egly",
title = "Embedding Lax Logic into Intuitionistic Logic",
crossref = "CADE:2002",
pages = "78--93",
abstract = "Lax logic is obtained from intuitionistic logic by
adding a single modality $\circ$\/ which captures
properties of necessity and possibility. This modality
was considered by Curry in two papers from 1952 and
1957 and rediscovered recently in different contexts
like verification of circuits and the computational
$\lambda$-calculus. We show that lax logic can be
faithfully embedded into the underlying intuitionistic
logic and discuss (computational) properties of the
embedding. Using the proposed polynomial-time
computable embedding, PSPACE-completeness of the
provability problem of propositional lax logic is
shown.",
}
@InProceedings{48:CADE:2002,
author = "Dominique Larchey-Wendling",
title = "Combining proof-search and counter-model construction
for deciding {G{\"o}del-Dummett} logic",
crossref = "CADE:2002",
pages = "94--110",
abstract = "We present an algorithm for deciding {G\"odel}-Dummett
logic. The originality of this algorithm comes from the
combination of proof-search in sequent calculus, which
reduces a sequent to a set of pseudo-atomic sequents,
and counter-model construction of such pseudo-atomic
sequents by a fixpoint computation. From an analysis of
this construction, we deduce a new logical rule which
provides shorter proofs than the corresponding rule of
G4-LC. We also present a linear implementation of the
counter-model generation algorithm for pseudo-atomic
sequents.",
}
@InProceedings{51:CADE:2002,
author = "Didier Galmiche and Daniel Mery",
title = "Connection-based proof search in propositional {BI}
logic",
crossref = "CADE:2002",
pages = "111--128",
abstract = "We present a connection-based characterization of
propositional BI (logic of bunched implications), a
logic combining linear and intuitionistic connectives.
This logic, with its sharing interpretation, has been
recently used to reason about mutable data structures
and needs proof search methods. Our connection-based
characterization for BI, is based on standard notions
but involves, in a specific way, labels and constraints
in order to capture the interactions between
connectives during the proof-search. As BI is
conservative w.r.t.\ intuitionistic logic and
multiplicative intuitionistic linear logic, we deduce,
by some restrictions, new connection-based
characterizations and methods for both logics.",
}
@InProceedings{8:CADE:2002,
author = "Jesper M{\o}ller",
title = "{DDDLIB}: {A} Library For Solving Quantified
Difference Inequalities",
type = "system description",
crossref = "CADE:2002",
pages = "129--133",
abstract = "DDDLIB is a library for manipulating formulae in a
first-order logic over Boolean variables and
inequalities of the form $x_1-x_2\le d$, where
$x_1,x_2$ are real variables and $d$ is an integer
constant. Formulae are represented in a semi-canonical
data structure called difference decision diagrams
(DDDs) which provide efficient algorithms for
constructing formulae with the standard Boolean
operators (conjunction, disjunction, negation, etc.),
eliminating quantifiers, and deciding functional
properties (satisfiability, validity and equivalence).
The library is written in C and has interfaces for C++,
Standard ML and Objective Caml.",
}
@InProceedings{57:CADE:2002,
author = "Joe Hurd",
title = "An {LCF}-Style Interface between {HOL} and First-Order
Logic",
type = "system description",
crossref = "CADE:2002",
pages = "134--138",
abstract = "Performing interactive proof in the $HOL$ theorem
prover involves reducing goals to simpler subgoals. It
turns out that many of these subgoals can be
efficiently `finished off' by an automatic first-order
prover. Given this level of demand for automatic
first-order proof by users performing interactive proof
in $HOL$, it seems worthwhile to look for ways to
narrow the gap between these two worlds.",
}
@InProceedings{34:CADE:2002,
author = "Michael Kohlhase and J{\"u}rgen Zimmer",
title = "System Description: The {MathWeb} Software Bus for
Distributed Mathmatical Reasoning",
type = "system description",
crossref = "CADE:2002",
pages = "139--143",
abstract = "This system description summarizes the development of
MathWeb in the last three years. We further extended
the list of reasoning systems integrated in the
MathWeb-SB, stabilized existing integrations and
explored new application domains for the MathWeb-SB.
The main improvements are a more flexible architecture
and increased standards support in the communication
protocols used in MathWeb-SB. As a consequence, it is
much simpler now to use and integrate mathematical
services into the MathWeb-SB.",
}
@InProceedings{21:CADE:2002,
author = "J{\"o}rg Siekmann and others",
title = "Proof Development with {OMEGA}",
type = "system description",
crossref = "CADE:2002",
pages = "144--149",
abstract = "{OMEGA} is a mathematical assistant tool that supports
proof development in mathematical domains at a
user-friendly level of abstraction. It is a modular
system with a central data structure and several
complementary subsystems. {OMEGA} has many
characteristics in common with systems like NUPRL, COQ,
HOL, and PVS. However, it differs from these systems
with respect to its focus on {\em proof planning}. We
present an overview of the architecture of the {OMEGA}
system and sketch some of its novel features. Special
features of {OMEGA} include (1) facilities to access a
considerable number of different reasoning systems and
to integrate their results into a single proof
structure, (2) support for interactive proof
development through some non-standard inspection
facilities and guidance in the search for a proof, and
(3) methods to develop proofs at a knowledge-based
level.",
}
@InProceedings{2:CADE:2002,
author = "Mateja Jamnik and Manfred Kerber and Martin Pollet",
title = "Learn{O}matic: System Description",
type = "system description",
crossref = "CADE:2002",
pages = "150--155",
abstract = "We devised a framework within which a proof planning
system can learn frequently occurring patterns of
reasoning automatically from a number of typical
examples, and then use them in proving new theorems.
The availability of such patterns, captured as proof
methods in a proof planning system, reduces search and
proof length. We implemented this learning framework
for the proof planner OMEGA, and present it in this
paper -- we call our system {\sc Learn{O}matic}.",
}
@InProceedings{64:CADE:2002,
author = "Carlos Eduardo Areces and Juan Heguiabehere",
title = "{HyLoRes} 1.0: Direct Resolution for Hybrid Logics",
type = "system description",
crossref = "CADE:2002",
pages = "156--160",
abstract = "HLR is a direct resolution prover for hybrid logics.
The most interesting distinguishing feature of HLR is
that it is not based on tableau algorithms but on
(direct) resolution. HLR implements a version of the
``given clause'' algorithm, which has become the
skeleton underlying most first-order provers. In
contrast to translation based provers like MSPASS, HLR
performs resolution directly on the modal (or hybrid)
input, with no translation into background logics. It
is often said that hybrid logics combine interesting
features from both modal and first-order logics. In the
same spirit, HLR fuses ideas from state-of-the-art
first-order proving with the simple representation of
the hybrid object language.",
}
@InProceedings{7:CADE:2002,
author = "Eugene Goldberg",
title = "Testing satisfiability of {CNF} formulas by computing
a stable set of points",
crossref = "CADE:2002",
pages = "161--180",
abstract = "We show that a conjunctive normal form (CNF) formula
$F$ is unsatisfiable iff there is a set of points of
the Boolean space that is stable with respect to $F$.
So testing the satisfiability of a CNF formula reduces
to looking for a stable set of points (SSP). We give
some properties of SSPs and describe a simple algorithm
for constructing an SSP for a CNF formula. Building an
SSP can be viewed as a ``natural'' way of search space
traversal. This naturalness of search space examination
allows one to make use of the regularity of CNF
formulas to be checked for satisfiability. We
illustrate this point by showing that if a CNF $F$
formula is symmetric with respect to a group of
permutations, it is very easy to make use of this
symmetry when constructing an SSP. As an example, we
show that the unsatisfiability of pigeon-hole CNF
formulas can be proven by examining only a set of
points whose size is quadratic in the number of
holes.",
}
@InProceedings{31:CADE:2002,
author = "Thierry Boy de la Tour",
title = "A Note on Symmetry Heuristics in {SEM}",
crossref = "CADE:2002",
pages = "181--194",
abstract = "We analyse two symmetry heuristics, i.e. heuristics
that reduce the search space through properties of
symmetry, in the finite model generator SEM. These are
SEM's original LNH, and a recent extension XLNH. Our
aim is to show how a simple group-theoretic framework
brings much clarity in this matter, especially through
group actions. Both heuristics can be seen as
computationally efficient ways of applying a general
symmetry pruning theorem. Moreover, simple
combinatorics provide some insight into the relative
performances of these heuristics. We finally expose a
fundamental difficulty in making SEM symmetry efficient
by symmetry pruning.",
}
@InProceedings{67:CADE:2002,
author = "Gilles Audemard and Piergiorgio Bertoli and Alessandro
Cimatti and Artur Kornilowicz and Roberto Sebastiani",
title = "A {SAT} Based Approach for Solving Formulas over
Boolean and Linear Mathematical Propositions",
crossref = "CADE:2002",
pages = "195--210",
abstract = "The availability of decision procedures for
combinations of boolean and linear mathematical
propositions opens the ability to solve problems
arising from real-world domains such as verification of
timed systems and planning with resources. In this
paper we present a general and efficient approach to
the problem, based on two main ingredients. The first
is a DPLL-based SAT procedure, for dealing efficiently
with the propositional component of the problem. The
second is a tight integration, within the DPLL
architecture, of a set of mathematical deciders for
theories of increasing expressive power. A preliminary
experimental evaluation shows the potential of the
approach.",
}
@InProceedings{46:CADE:2002,
author = "Wolfgang Ahrendt",
title = "Deductive Search for Errors in Free Data Type
Specifications using Model Generation",
crossref = "CADE:2002",
pages = "211--225",
abstract = "The presented approach aims at identifying false
conjectures about free data types. Given a
specification and a conjecture, the method performs a
search for a model of an according \emph{counter
specification}. The model search is tailor-made for the
semantical setting of free data types, where the fixed
domain allows to describe models just in terms of
\emph{interpretations}. For sake of interpretation
construction, a theory specific calculus is provided.
The concrete rules are `executed' by a procedure known
as \emph{model generation}. As most free data types
have infinite domains, the ability of automatically
solving the non-consequence problem is necessarily
limited. That problem is addressed by limiting the
\emph{instantiation} of the axioms. This approximation
leads to a restricted notion of model correctness,
which is discussed. At the same time, it enables model
completeness for free data types, unlike approaches
based on limiting the domain size.",
}
@InProceedings{25:CADE:2002,
author = "Gilles Audemard and Belaid Benhamou",
title = "Reasoning by symmetry and function ordering in Finite
model generation",
crossref = "CADE:2002",
pages = "226--240",
abstract = "Finite model search for first-order logic theories is
complementary to theorem proving. Systems like Falcon,
SEM and FMSET use the known LNH (Least Number
Heuristic) heuristic to eliminate some trivial
symmetries. Such symmetries are worthy, but their
exploitation is limited to the first levels of the
model search tree, since they disappear as soon as the
first cells have been interpreted. The symmetry
property is well-studied in propositional logic and
CSPs, but only few trivial results on this are known on
model generation in first-order logic. We study in this
paper both an ordering strategy that selects the next
terms to be interpreted and a more general notion of
symmetry for finite model search in first-order logic.
We give an efficient detection method for such symmetry
and show its combination with the trivial one used by
LNH and LNHO heuristics. This increases the efficiency
of finite model search generation. The method SEM with
and without both the function ordering and symmetry
detection is experimented on several interesting
mathematical problems to show the advantage of
reasoning by symmetry and the function ordering.",
}
@InProceedings{32:CADE:2002,
author = "Bernhard Gramlich and Reinhard Pichler",
title = "Algorithmic Aspects of Herbrand Models Represented by
Ground Atoms with Ground Equations",
crossref = "CADE:2002",
pages = "241--259",
abstract = "\emph{Automated model building} has evolved as an
important subdiscipline of \emph{automated deduction}
over the past decade. One crucial issue in automated
model building is the selection of an appropriate
(finite) representation of (in general infinite)
models. Quite a few such formalisms have been proposed
in the literature. In this paper, we concentrate on the
representation of Herbrand models by ground atoms with
ground equations (GAE-models). For the actual work with
any model representation, efficient algorithms for two
decision problems are required, namely: The {\em clause
evaluation problem} (i.e.: Given a clause $C$ and a
representation $\mathcal{M}$ of a model, does $C$
evaluate to ``true'' in this model?) and the {\em model
equivalence problem} (i.e.: Given two representations
$\mathcal{M}_1$ and $\mathcal{M}_2$, do they represent
the same model?). Previously published algorithms for
these two problems in case of GAE-models require
exponential time. We prove that the clause evaluation
problem is indeed intractable (that is, coNP-complete),
whereas the model equivalence problem can be solved in
polynomial time. Moreover, we show how our new
algorithm for the model equivalence problem can be used
to transform an arbitrary GAE-model into an equivalent
one with better computational properties.",
}
@InProceedings{70:CADE:2002,
author = "Lilia Georgieva and Ullrich Hustadt and Renate A.
Schmidt",
title = "A new clausal class decidable by hyperresolution",
crossref = "CADE:2002",
pages = "260--274",
abstract = "In this paper we define a new clausal class, called
BU, which can be decided by hyperresolution with
splitting. We also consider the model generation
problem for BU and show that hyperresolution plus
splitting can also be used as a Herbrand model
generation procedure for BU and, furthermore, that the
addition of a local minimality test allows us to
generate only minimal Herbrand models for clause sets
in BU. In addition, we investigate the relationship of
BU to other solvable classes.",
}
@InProceedings{47:CADE:2002,
author = "Christoph Weidenbach and Uwe Brahm and Thomas
Hillenbrand and Enno Keen and Christian Theobalt and
Dalibor Topic",
title = "{SPASS} Version 2.0",
type = "system description",
crossref = "CADE:2002",
pages = "275--279",
abstract = "SPASS is an automated theorem prover for full
first-order logic with equality. This system
description provides an overview of recent developments
in SPASS~2.0, including among others an implementation
of contextual rewriting, refinements of the clause
normal form transformation, and enhancements of the
inference engine.",
}
@InProceedings{23:CADE:2002,
author = "Stephan Schulz and Geoff Sutcliffe",
title = "System Description: {GrAnDe} 1.0",
type = "system description",
crossref = "CADE:2002",
pages = "280--284",
abstract = "The validity problem for full first-order logic is
only semi-decidable. However, there are many
interesting problems that, when expressed in clause
normal form, have a finite Herbrand universe. They fall
into a decidable subclass of first-order logic.
Traditionally, such problems have been tackled using
conventional first-order techniques. Some
implementations, e.g.\ DCTP, are decision procedures
for this class of problems. An alternative approach,
justified by Herbrand's theorem, is to generate the
ground instances of such a problem and use a
propositional decision system to determine the
satisfiability of the resulting propositional problem.
The applicability of the grounding approach has led to
these problems being called ``effectively
propositional'' (EPR) problems. The TPTP problem
library v2.4.1 contains 574 EPR problems. Many of these
are group theory problems (101 problems) and CNF
translations of formulae in propositional multi-modal
logic (206 problems).",
}
@InProceedings{56:CADE:2002,
author = "Simon Colton",
title = "System Description: The {HR} Program for Theorem
Generation",
type = "system description",
crossref = "CADE:2002",
pages = "285--289",
abstract = "Automated theory formation involves the production of
objects of interest, concepts about those objects,
conjectures relating the concepts and proofs of the
conjectures. In group theory, for example, the objects
of interest are the groups themselves, the concepts
include element types, subgroup types, etc., the
conjectures include implication and if-and-only-if
conjectures and these become theorems if they are
proved, non-theorems if disproved. Similar to Zhang's
MCS program, the HR system -- named after
mathematicians Hardy and Ramanujan -- performs theory
formation in mathematical domains. It works by (i)
using the MACE model generator to generate objects of
interest from axiom sets (ii) performing the concept
formation and conjecture making itself and (iii) using
the Otter theorem prover to prove conjectures.",
}
@InProceedings{20:CADE:2002,
author = "Michael Whalen and Johann Schumann and Bernd Fischer",
title = "{AutoBayes/CC} - Combining Program Synthesis with
Automatic Code Certification - System Description",
type = "system description",
crossref = "CADE:2002",
pages = "290--294",
abstract = "Our work combines code certification with automatic
program synthesis which makes it possible to
automatically generate both the code and all necessary
annotations for fully automatic certification. By
\emph{generating} detailed annotations, one of the
biggest obstacles for code certification is removed and
it becomes possible to automatically check that
synthesized programs obey the desired safety
properties.",
}
@InProceedings{Malik:FLoC:2002:*CADE+CAV,
author = "Sharad Malik",
title = "The Quest for Efficient {B}oolean Satisfiability
Solvers",
type = "Invited talk",
crossref = "CADE:2002",
pages = "295--313",
abstract = "The classical NP-complete problem of Boolean
Satisfiability (SAT) has seen much interest in not just
the theoretical computer science community, but also in
areas where practical solutions to this problem enable
significant practical applications. Since the first
development of the basic search based algorithm
proposed by Davis, Putnam, Logemann and Loveland (DPLL)
about forty years ago, this area has seen active
research effort with many interesting contributions
that have culminated in state-of-the-art SAT solvers
today being able to handle problem instances with
thousands, and in same cases even millions, of
variables. In this paper we examine some of the main
ideas along this passage that have led to our current
capabilities. Given the depth of the literature in this
field, it is impossible to do this in any comprehensive
way; rather we focus on techniques with consistent
demonstrated efficiency in available solvers. For the
most part, we focus on techniques within the basic DPLL
search framework, but also briefly describe other
approaches and look at some possible future research
directions.",
}
@InProceedings{65:CADE:2002,
author = "Cristina Borralleras and Salvador Lucas and Albert
Rubio",
title = "Recursive Path Orderings can be Context-Sensitive",
crossref = "CADE:2002",
pages = "314--331",
abstract = "Context-sensitive rewriting (CSR) is a simple
restriction of rewriting which can be used e.g.\ for
modelling non-eager evaluation in programming
languages. Many times {\em termination\/} is a crucial
property for program verification. Hence, developing
tools for automatically proving termination of CSR is
necessary. All known methods for proving termination of
(CSR) systems are based on transforming the CSR system
$\rightarrow$ into a (standard) rewrite system
$\rightarrow'$ whose termination implies the
termination of the CSR system $\rightarrow$. In this
paper first several negative results on the
applicability of existing transformation methods are
provided. Second, as a general-purpose way to overcome
these problems, we develop the first (up to our
knowledge) method for proving directly termination of
context-sensitive rewrite systems: the {\em context
sensitive recursive path ordering} (CSRPO). Many
interesting (realistic) examples that cannot be proved
or are hard to prove with the known transformation
methods are easily handled using CSRPO. Moreover, CSRPO
is very suitable for automation.",
}
@InProceedings{17:CADE:2002,
author = "Harald Ganzinger",
title = "Shostak Light",
crossref = "CADE:2002",
pages = "332--346",
abstract = "We represent the essential ingredients of Shostak's
procedure at a high level of abstraction, and as a
refinement of the Nelson-Oppen procedure. We analyze
completeness issues of the method based on a general
notion of theories. We also formalize a notion of
$\sigma$-models and show that on the basis of Shostak's
procedure we cannot distinguish a theory from its
approximation represented by the class of its
$\sigma$-models.",
}
@InProceedings{45:CADE:2002,
author = "Jonathan Ford and Natarajan Shankar",
title = "Formal Verification of a Combination Decision
Procedure",
crossref = "CADE:2002",
pages = "347--362",
abstract = "Decision procedures for combinations of theories are
at the core of many modern theorem provers such as
ACL2, {\sc Ehdm}, PVS, SIMPLIFY, the Stanford Pascal
Verifier, STeP, SVC, and Z/Eves. Shostak, in 1984,
published a decision procedure for the combination of
canonizable and solvable theories. Recently, Ruess and
Shankar showed Shostak's method to be incomplete and
nonterminating, and presented a correct version of
Shostak's algorithm along with informal proofs of
termination, soundness, and completeness. We describe a
formalization and mechanical verification of these
proofs using the PVS verification system. The
formalization itself posed significant challenges and
the verification revealed some gaps in the informal
argument.",
}
@InProceedings{4:CADE:2002,
author = "Calogero G. Zarba",
title = "Combining Multisets with Integers",
crossref = "CADE:2002",
pages = "363--376",
abstract = "We present a decision procedure for a constraint
language combining multisets of ur-elements, the
integers, and an arbitrary first-order theory $T$ of
the ur-elements. Our decision procedure is an extension
of the Nelson-Oppen combination method specifically
tailored to the combination domain of multisets,
integers, and ur-elements.",
}
@InProceedings{14:CADE:2002,
author = "Lawrence C. Paulson",
title = "The Reflection Theorem: {A} Study in Formalizing
Meta-Theoretic Reasoning",
crossref = "CADE:2002",
pages = "377--391",
abstract = "The reflection theorem has been proved using
Isabelle/ZF. This theorem cannot be expressed in ZF,
and its proof requires reasoning at the meta-level.
There is a particularly elegant proof that reduces the
meta-level reasoning to a single induction over
formulas. Each case of the induction has been proved
with Isabelle/ZF, whose built-in tools can prove
specific instances of the reflection theorem upon
demand.",
}
@InProceedings{24:CADE:2002,
author = "Aaron Stump and David L. Dill",
title = "Faster Proof Checking in the {Edinburgh Logical
Framework}",
crossref = "CADE:2002",
pages = "392--407",
abstract = "This paper describes optimizations for checking proofs
represented in the Edinburgh Logical Framework (LF).
The optimizations allow large proofs to be checked
efficiently which cannot feasibly be checked using the
standard algorithm for LF. The crucial optimization is
a form of result caching. To formalize this
optimization, a path calculus for LF is developed and
shown equivalent to a standard calculus.",
}
@InProceedings{18:CADE:2002,
author = "Chad E. Brown",
title = "Solving for Set Variables in Higher-Order Theorem
Proving",
crossref = "CADE:2002",
pages = "408--422",
abstract = "In higher-order logic, we must consider literals with
flexible (set variable) heads. Set variables may be
instantiated with logical formulas of arbitrary
complexity. An alternative to guessing the logical
structures of instantiations for set variables is to
solve for sets satisfying constraints. Using the
Knaster-Tarski Fixed Point Theorem, constraints whose
solutions require recursive definitions can be solved
as fixed points of monotone set functions. In this
paper, we consider an approach to higher-order theorem
proving which intertwines conventional theorem proving
in the form of mating search with generating and
solving set constraints.",
}
@InProceedings{38:CADE:2002,
author = "Orna Kupferman and Ulrike Sattler and Moshe Y. Vardi",
title = "The Complexity of the Graded $\mu$-Calculus",
crossref = "CADE:2002",
pages = "423--437",
abstract = "In classical logic, existential and universal
quantifiers express that there exists at least one
individual satisfying a formula, or that all
individuals satisfy a formula. In many logics, these
quantifiers have been generalized to express that, for
a non-negative integer $n$, at least $n$ individuals or
all but $n$ individuals satisfy a formula. In modal
logics, \emph{graded modalities} generalize standard
existential and universal modalities in that they
express, e.g., that there exist at least $n$ accessible
worlds satisfying a certain formula. Graded modalities
are useful expressive means in knowledge
representation; they are present in a variety of other
knowledge representation formalisms closely related to
modal logic. A natural question that arises is how the
generalization of the existential and universal
modalities affects the satisfiability problem for the
logic and its computational complexity, especially when
the numbers in the graded modalities are coded in
binary. In this paper we study the {\em graded
$\mu$-calculus}, which extends graded modal logic with
fixed-point operators, or, equivalently, extends
classical $\mu$-calculus with graded modalities. We
prove that the satisfiability problem for graded
$\mu$-calculus is EXPTIME-complete -- not harder than
the satisfiability problem for $\mu$-calculus, even
when the numbers in the graded modalities are coded in
binary.",
}
@InProceedings{58:CADE:2002,
author = "Leonardo de Moura and Harald Rue{\ss} and Maria
Sorea",
title = "Lazy Theorem Proving for Bounded Model Checking over
Infinite Domains",
crossref = "CADE:2002",
pages = "438--455",
abstract = "We investigate the combination of propositional SAT
checkers with domain-specific theorem provers as a
foundation for bounded model checking over infinite
domains. Given a program $M$ over an infinite state
type, a linear temporal logic formula $\varphi$ with
domain-specific constraints over program states, and an
upper bound $k$, our procedure determines if there is a
falsifying path of length $k$ to the hypothesis that
$M$ satisfies the specification $\varphi$\@. This
problem can be reduced to the satisfiability of Boolean
constraint formulas. Our verification engine for these
kinds of formulas is {\em lazy} in that propositional
abstractions of Boolean constraint formulas are
incrementally refined by generating lemmas on demand
from an automated analysis of spurious counterexamples
using theorem proving. We exemplify bounded model
checking for timed automata and for RTL level
descriptions, and investigate the lazy integration of
SAT solving and theorem proving.",
}
@InProceedings{66:CADE:2002,
author = "Miquel Bofill and Albert Rubio",
title = "Well-foundedness is sufficient for completeness of
Ordered Paramodulation",
crossref = "CADE:2002",
pages = "456--470",
abstract = "For many years all known completeness results for
Knuth-Bendix completion and ordered paramodulation
required the term ordering $\succ$ to be well-founded,
monotonic and total(izable) on ground terms. Then, it
was shown that well-foundedness and the subterm
property were enough for ensuring completeness of
ordered paramodulation. Here we show that the subterm
property is not necessary either. By using a new
restricted form of rewriting we obtain a completeness
proof of ordered paramodulation for Horn clauses with
equality where well-foundedness of the ordering
suffices. Apart from the fundamental interest of this
result, some potential applications motivating the
interest of dropping the subterm property are given.",
}
@InProceedings{43:CADE:2002,
author = "Christopher Lynch and Barbara Morawska",
title = "Basic Syntactic Mutation",
crossref = "CADE:2002",
pages = "471--485",
abstract = "We give a set of inference rules for $E$-unification,
similar to the inference rules for Syntactic Mutation.
If the $E$ is finitely saturated by paramodulation,
then we can block certain terms from further
inferences. Therefore, $E$-unification is decidable in
$NP$, as is also the case for Basic Narrowing. However,
if we further restrict $E$, then our algorithm runs in
quadratic time, whereas Basic Narrowing does not become
polynomial, since it is still nondeterministic.",
}
@InProceedings{59:CADE:2002,
author = "Thomas Hillenbrand and Bernd L{\"o}chner",
title = "The Next {Waldmeister} Loop",
crossref = "CADE:2002",
pages = "486--500",
abstract = "In saturation-based theorem provers, the reasoning
process consists in constructing the closure of an
axiom set under inferences. As is well-known, this
process tends to quickly fill the memory available
unless preventive measures are employed. For
implementations based on the Discount loop, the passive
facts are responsible for most of the memory
consumption. We present a refinement of that loop
allowing such a compression that the space needed for
the passive facts is linearly bound by the number of
active facts. In practice, this will reduce memory
consumption in the Waldmeister system by more than one
order of magnitude as compared to previous compression
schemes.",
}
@InProceedings{39:CADE:2002,
author = "Jean-Marc Andreoli",
title = "Focussing Proof-Net Construction as a Middleware
Paradigm",
crossref = "CADE:2002",
pages = "501--516",
abstract = "This paper introduces a new formulation of the
computational paradigm based on proof-construction in
terms of proof-nets. It shows the relevance of this
paradigm, thus formulated, to capture some of the
fundamental mechanisms of distributed computation (and
in particular, transaction mechanisms), which are
familiar concepts of middleware infrastructures. It
therefore constitutes a first step in the direction of
the definition of a steady conceptual framework in
which to formalise and study various middleware
notions, which, until now, have essentially been
studied through ad-hoc and diverse formalisms. Due to
space constraints, the proofs of the technical results
of this paper have been skipped. They were reviewed
with the initially submitted version of the paper and
are available from the author.",
}
@InProceedings{Baaz:FLoC:2002:*CADE+TABLEAUX,
author = "Matthias Baaz",
title = "Proof Analysis by Resolution",
type = "Invited talk",
crossref = "CADE:2002",
pages = "517--532",
abstract = "Proof analysis of existing proofs is one of the main
sources of scientific progress in mathematics: new
concepts can be obtained e.g. by denoting explicit
definitions in proof parts and axiomatizing them as new
mathematical objects in their own right. (The
development of the concept of integral is a well known
example.) All forms of proof analysis are intended to
make informations implicit in a proof explicit i.e.
visible. Logical proof analysis is mainly concerned
with the implicit constructive content of more or less
formalized proofs. In this paper, we concentrate on
automatizable logical proof analysis in first-order
logic by means of incooperating resolution.",
}
@Proceedings{CADE:2002,
editor = "Andrei Voronkov",
title = "Automated Deduction -- {CADE-18}",
booktitle = "Automated Deduction -- {CADE-18}",
conference = "18th International Conference on Automated Deduction",
key = "CADE",
year = "2002",
month = jul # " 27-30",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2392",
publisher = "Springer-Verlag",
ISBN = "3-540-43931-5",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CAV - Computer Aided Verification
% ===========================
@InProceedings{Holzmann:CAV:2002,
author = "Gerard J. Holzmann",
title = "Software Analysis and Model Checking",
type = "Invited talk",
crossref = "CAV:2002",
pages = "1--16",
abstract = "Most software developers today rely on only a small
number of techniques to check their code for defects:
peer review, code walkthroughts, and testing. Depsite a
rrich literature on these subjects, the results often
leave much to be desired. The current software testing
process consumes a significant fraction of the overall
resources in industrial software development, yet it
cannot promise zero-defect coed. Tehre is reason to
hope that the process can be improved. A range of tools
and techniques has become available in the last few
years that can asses the quality of code with
considerably more rigor than before, and often also
with more easy. Many of the new tools can be understood
as applications of automata theory, and can readily be
combined with logic model checking techniques.",
}
@InProceedings{Malik:FLoC:2002:CADE+*CAV,
author = "Sharad Malik",
title = "The Quest for Efficient {B}oolean Satisfiability
Solvers",
type = "Invited talk",
crossref = "CAV:2002",
pages = "17--36",
abstract = "The classical NP-complete problem of Boolean
Satisfiability (SAT) has seen much interest in not just
the theoretical computer science community, but also in
areas where practical solutions to this problem enable
significant practical applications. Since the first
development of the basic search based algorithm
proposed by Davis, Putnam, Logemann and Loveland (DPLL)
about forty years ago, this area has seen active
research effort with many interesting contributions
that have culminated in state-of-the-art SAT solvers
today being able to handle problem instances with
thousands, and in same cases even millions, of
variables. In this paper we examine some of the main
ideas along this passage that have led to our current
capabilities. Given the depth of the literature in this
field, it is impossible to do this in any comprehensive
way; rather we focus on techniques with consistent
demonstrated efficiency in available solvers. For the
most part, we focus on techniques within the basic DPLL
search framework, but also briefly describe other
approaches and look at some possible future research
directions.",
}
@InProceedings{Cousot:CAV:2002,
author = "Patrick Cousot",
title = "On Abstraction in Software Verification",
type = "Invited tutorial",
crossref = "CAV:2002",
pages = "37--56",
abstract = "We show that the precisiion of static abstract
software checking algorithms can be enhanced by taking
explicitely into account the abstractions that are
involved in the design of the program model/abstract
semantics. This is illustrated on reachability analysis
and abstract testing.",
}
@InProceedings{Henzinger:CAV:2002,
author = "Thomas A. Henzinger",
title = "The Symbolic Approach to Hybrid Systems",
type = "Invited tutorial",
crossref = "CAV:2002",
pages = "57",
abstract = "A hybrid system is a dynamical system whose state has
both a discrete component, which is updated in a
sequence of steps, and a continuous component, which
evolves over time. Hybrid systems are a useful modeling
tool in a variety of situations, including the embedded
(digital) control of physical (analog) plants,
robotics, circuits, biology, and finance. \\
We survey a computational approach to the verification
and control of hybrid systems which is based on the
symbolic discretization of continuous state changes. On
the theoretical side, we classify infinite, hybrid
state spaces as to which finite, discrete abstractions
they admit. This classification enables us to apply
concepts and results from concurrency theory, model
checking, and game theory to hybrid systems. On the
practical side, we discuss several options for
implementing the symbolic approach to hybrid systems,
and point to existing tool support.",
}
@InProceedings{Thomas:CAV:2002,
author = "Wolfgang Thomas",
title = "Infinite Games and Verification",
type = "Invited tutorial",
crossref = "CAV:2002",
pages = "58--64",
abstract = "The purpose of this tutorial is to survey the
essentials of the algorithmic theory of infinite games,
its role in automatic program synthesis and
verificaiton, and some challenges of current
research.",
}
@InProceedings{Paper5:CAV:2002,
author = "Sharon Barner and Daniel Geist and Anna Gringauze",
title = "Symbolic Localization Reduction with Reconstruction
Layering and Backtracking",
crossref = "CAV:2002",
pages = "65--77",
abstract = "{ Localization reduction is an abstraction-refinement
scheme for model checking which was introduced by Bob
Kurshan as a means for tackling state explosion. It is
completely automatic, but despite the work that has
been done related to this scheme, it still suffers from
computational complexity. In this paper we present
algorithmic improvements to localization reduction that
enabled us to overcome some of these problems. Namely,
we present a new symbolic algorithm for path
reconstruction including incremental refinement and
backtracking. We have implemented these improvements
and compared them to previous work on a large number of
our industrial examples. In some cases the improvement
was dramatic. Using these improvements we were able to
verify circuits that we were not previously able to
address.}",
}
@InProceedings{Paper6:CAV:2002,
author = "Randal E. Bryant and Shuvendu K. Lahiri and Sanjit A.
Seshia",
title = "Modeling and Verifying Systems using a Logic of
Counter Arithmetic with Lambda Expressions and
Uninterpreted Functions",
crossref = "CAV:2002",
pages = "78--92",
abstract = "{ Verifiers for infinite-state systems must trade off
between the expressiveness of the modeling formalism
and the efficiency and automation of the tool.
Efficient methods have been devised for specific
classes of systems, such as superscalar processors and
systems with arbitrary size queues. However, to model
systems that are combinations of these classes, no one
method works well enough. \\
In this paper, we present CLU, a logic of Counter
arithmetic with Lambda expressions and Uninterpreted
Functions. CLU generalizes the logic of equality with
uninterpreted functions (EUF) that has proved useful
for verifying pipelined processors. CLU includes, in
addition to the constructs of EUF, constrained lambda
expressions, ordering, and counter arithmetic. This
allows us to model infinite-state systems, such as a
variety of infinite memory structures, finite and
infinite queues including lossy channels, and networks
of identical processes. Even with this richer
expressive power, the validity of a CLU formula can be
efficiently decided by translating it to a
propositional formula, and then using Boolean methods
to check validity. \\
We have built UCLID, a system that can be used for
checking safety properties of systems modeled in CLU.
We demonstrate that UCLID is efficient and expressive,
by modeling and verifying safety properties of a
variety of systems, including an out-of-order execution
unit and the load-store unit of an industrial
microprocessor.}",
}
@InProceedings{Paper9:CAV:2002,
author = "Sharon Barner and Orna Grumberg",
title = "Combining symmetry reduction and under-approximation
for symbolic model checking",
crossref = "CAV:2002",
pages = "93--106",
abstract = "{ This work presents a collection of methods,
integrating {\em symmetry reduction}, {\em
under-approximation}, and {\em symbolic model checking}
in order to reduce space and time for model checking.
The main goal of this work is {\em falsification}.
However, under certain conditions our methods provide
{\em verification} as well. \\
We first present algorithms that perform on-the-fly
model checking for temporal safety properties, using
symmetry reduction. We then extend these algorithm for
checking liveness properties as well. \\
Our methods are fully automatic. The user should supply
some basic information about the symmetry in the
verified system. However, the methods are {\em robust}
and work correctly even if the information supplied by
the user is incorrect. Moreover, the methods return
correct results even in case the computation of the
symmetry reduction has not been completed due to memory
or time explosion. \\
We implemented our methods within IBM's model checker
RuleBase, and compared the performance of our methods
with that of RuleBase. In most cases, our algorithms
outperformed RuleBase with respect to both time and
space.}",
}
@InProceedings{Paper27:CAV:2002,
author = "Amir Pnueli and Jessie Xu and Lenore Zuck",
title = "Liveness with (0,1,infinity)-Counter Abstraction",
crossref = "CAV:2002",
pages = "107--122",
abstract = "{ We introduce the {(0,1,infinity)}-counter
abstraction method by which a parameterized system of
unbounded size is abstracted into a finite-state
system. Assuming that each process in the parameterized
system is finite-state, the abstract variables are
limited counters which count, for each local state s of
a process, the number of processes which currently are
in local state s. The counters are saturated at 2,
which means that counter(s)=2 whenever 2 or more
processes are at state s. The emphasis of the paper is
on the derivation of an adequate and sound set of
fairness requirements (both weak and strong) which
enable proofs of liveness properties of the abstract
system, from which we can safely conclude a
corresponding liveness property of the original
parameterized system. We illustrate the method on few
parameterized systems, including Szymanski's Algorithm
for mutual exclusion. \\
The method is then extended to deal with parameterized
systems whose processes may have infinitely many local
states, such as the Bakery Algorithm. The extension is
based on a choice of few ``interesting'' and
``relevant'' state assertions and then
{(0,1,infinity)}-counting the number of processes
satisfying each of these assertions.}",
}
@InProceedings{Paper32:CAV:2002,
author = "Prosenjit Chatterjee and Hemanthkumar Sivaraj and
Ganesh Gopalakrishnan",
title = "Shared memory consistency protocol verification
against weak memory models: refinement via
model-checking",
crossref = "CAV:2002",
pages = "123--136",
abstract = "{ Weak shared memory consistency models, especially
those used by modern microprocessor families, are quite
complex. The bus and/or directory-based protocols that
help realize shared memory multiprocessors using these
microprocessors are also exceedingly complex. Thus, the
{\em correctness problem} -- that all the executions
generated by the multiprocessor for any given
concurrent program are also allowed by the memory model
-- is a major challenge. In this paper, we present a
formal approach to verify protocol implementation
models against weak shared memory models through
automatable {\em refinement checking} supported by a
{\em model checker}. We define a taxonomy of weak
shared memory models that includes most published
commercial memory models, and detail how our approach
applies over all these models. In our approach, the
designer follows a prescribed procedure to build a
highly simplified intermediate abstraction for the
given implementation. The intermediate abstraction and
the implementation are concurrently run using a
model-checker, checking for refinement. The
intermediate abstraction can be proved correct against
the memory model specification using theorem proving.
We have verified four different Alpha memory model
implementations and two different Itanium memory model
implementations against their respective
specifications. The results are encouraging in terms of
the uniformity of the procedure, the high degree of
automation, acceptable run-times, and empirically
observed bug-hunting efficacy. The use of parallel
model-checking, based on a version of the parallel
Mur$\phi$ model checker we have recently developed for
the MPI library, has been essential to finish the
search in a matter of a few hours.}",
}
@InProceedings{Paper34:CAV:2002,
author = "Patrice Godefroid and Radha Jagadeesan",
title = "Automatic Abstraction Using Generalized Model
Checking",
crossref = "CAV:2002",
pages = "137--150",
abstract = "{ Generalized model checking is a framework for
reasoning about partial state spaces of concurrent
reactive systems. The state space of a system is only
``partial'' (partially known) when a full state-space
exploration is not computationally tractable, or when
abstraction techniques are used to simplify the
system's representation. In the context of automatic
abstraction, generalized model checking means checking
whether there exists a concretization of an abstraction
that satisfies a temporal logic formula. In this paper,
we show how generalized model checking can extend
existing automatic abstraction techniques (such as
predicate abstraction) for model checking
concurrent/reactive programs and yield the three
following improvements: (1) any temporal logic formula
can be checked (not just universal properties as with
traditional conservative abstractions), (2) correctness
proofs and counter-examples are both guaranteed to be
sound, and (3) verification results can be more
precise. We study the cost needed to improve precision
by presenting new upper and lower bounds for the
complexity of generalized model checking in the size of
the abstraction.}",
}
@InProceedings{Paper29:CAV:2002,
author = "Jason Baumgartner and Andreas Kuehlmann and Jacob
Abraham",
title = "Property Checking via Structural Analysis",
crossref = "CAV:2002",
pages = "151--165",
abstract = "{ This paper describes a framework for "structural
target enlargement". Our approach is based upon a
structural decomposition of the verification problem
into several subtasks, each solved by a specialized
algorithm for overall efficiency. Our contributions
include the following: (1) a robust backward unfolding
technique for structural target enlargement: from the
target states, we perform "compose-based" pre-image
computations for a number of steps determined by
structural analysis, truncating the search if resource
limitations are exceeded; (2) similar to frontier
simplification in symbolic reachability analysis, we
describe the application of don't cares for enhancing
the presented target enlargement; and (3) a structural
algorithm for computing a bound of a state-transition
diagram's diameter which, for several classes of
industrial designs, is sufficiently small to guarantee
completeness of a bounded model check. In most cases,
the verification problem is efficiently discharged by
the enlargement process; otherwise, it is passed in
simplified form to another solution approach. The
presented techniques are implemented in a flexible
verification framework that allows arbitrary
combination with other techniques. Extensive
experimental results demonstrate the effectiveness of
the described methods.}",
}
@InProceedings{Paper31:CAV:2002,
author = "Sriram K. Rajamani and Jakob Rehof",
title = "Conformance Checking for Models of Asynchronous
Message Passing Software",
crossref = "CAV:2002",
pages = "166--179",
abstract = "{ We propose a notion of conformance between a
specification $S$ and an implementation model $I$
extracted from a message-passing program. In our
framework, $S$ and $I$ are CCS processes, which soundly
abstract the externally visible communication behavior
of a message-passing program. We use the extracted
models to check that programs do not get stuck, waiting
to receive or trying to send messages in vain. We show
that our definition of stuckness and conformance
capture important correctness conditions of
message-passing software. Our definition of conformance
was motivated by the need for modular reasoning over
models, leading to the requirement that conformance
preserve substitutability with respect to
stuck-freeness: If $I$ conforms to $S$, and $P$ is any
environment such that $P \mid S$ is stuck-free, then it
follows that $P \mid I$ is stuck-free. We present an
algorithm for checking if $I$ conforms to $S$, discuss
implementation of the algorithm, and present experience
on some examples.}",
}
@InProceedings{Paper46:CAV:2002,
author = "Cormac Flanagan and Shaz Qadeer and Sanjit Seshia",
title = "A modular checker for multithreaded programs",
crossref = "CAV:2002",
pages = "180--194",
abstract = "{ Ensuring the reliability of multithreaded software
systems through testing is difficult, because of
nondeterministic and subtle interactions between
threads. Static checking, which has the potential to
analyze the program's behavior over all execution paths
and for all thread interleavings, is a promising
complementary technique. We have built a scalable and
expressive static checker called Calvin for
multithreaded programs. To handle realistic programs,
Calvin performs modular checking of each procedure
called by a thread using specifications of other
procedures and other threads. The checker leverages off
existing techniques based on verification conditions
and automatic theorem proving. \\
To evaluate the checker, we have applied it to several
real-world programs. Our experience indicates that
Calvin has a moderate annotation overhead and can catch
defects in multithreaded programs, including
synchronization errors and violation of data
invariants.}",
}
@InProceedings{Paper11:CAV:2002,
author = "Tomohiro Yoneda and Tomoya Kitai and Chris Myers",
title = "Automatic Derivation of Timing Constraints by Failure
Analysis",
crossref = "CAV:2002",
pages = "195--208",
abstract = "{ This work proposes a technique to automatically
obtain timing constraints for a given timed circuit to
operate correctly. A designated set of delay parameters
of a circuit are first set to sufficiently large
bounds, and verification runs followed by failure
analysis are repeated. Each verification run performs
timed state space enumeration under the given delay
bounds, and produces a failure trace if it exists. The
failure trace is analyzed, and sufficient timing
constraints to prevent the failure is obtained. Then,
the delay bounds are tightened according to the timing
constraints by using a ILP (Integer Linear Programming)
solver. This process terminates when either some delay
bounds under which no failure is detected are found or
no new delay bounds to prevent the failures can be
obtained. The experimental results using a naive
implementation show that the proposed method can
efficiently handle asynchronous benchmark circuits and
nontrivial GasP circuits.}",
}
@InProceedings{Paper13:CAV:2002,
author = "Ofer Strichman and Sanjit A. Seshia and Randal E.
Bryant",
title = "Reducing linear inequalities to propositional
formulas",
crossref = "CAV:2002",
pages = "209--222",
abstract = "{ We show a reduction to propositional logic from a
Boolean combination of inequalities of the form $v_i
\ge v_j + c$ and $v_i > v_j + c$, where $c$ is a
constant and $v_i,v_j$ are variables of type real or
integer. Equalities and uninterpreted functions can be
expressed in this logic as well. We discuss the
advantages of using this reduction as compared to
competing methods, and present experimental results
that support our claims.}",
}
@InProceedings{Paper26:CAV:2002,
author = "Hakan L. S. Younes and Reid G. Simmons",
title = "Probabilistic Verification of Descrete Event Systems
using Acceptance Sampling",
crossref = "CAV:2002",
pages = "223--235",
abstract = "{ We present a procedure for verifying properties of
discrete event systems modeled as generalized
semi-Markov processes. The dynamics of such systems can
be very complex, which makes them hard to analyze. We
resort to methods based on Monte Carlo simulation and
statistical hypothesis testing. The verification is
probabilistic in two senses. First, the properties,
expressed as CSL formulas interpreted over generalized
semi-Markov processes, can be probabilistic. Second,
the result of the verification is probabilistic, and
the probability of error is bounded by two parameters
passed to the verification procedure. The verification
of properties can be carried out in an anytime manner
by starting off with loose error bounds, and gradually
tightening these bounds.}",
}
@InProceedings{Paper15:CAV:2002,
author = "Clark W. Barrett and David L. Dill and Aaron Stump",
title = "Checking Satisfiability of First-Order Formulas by
Incremental Translation to {SAT}",
crossref = "CAV:2002",
pages = "236--249",
abstract = "{ In the past few years, general-purpose propositional
satisfiability (SAT) solvers have improved dramatically
in performance and have been used to tackle many new
problems. It has also been shown that certain simple
fragments of first-order logic can be decided
efficiently by first translating the problem into an
equivalent SAT problem and then using a fast SAT
solver. In this paper, we describe an alternative but
similar approach to using SAT in conjunction with a
more expressive fragment of first-order logic. However,
rather than translating the entire formula up front,
the formula is incrementally translated during a search
for the solution. As a result, only that portion of the
translation that is actually relevant to the solution
is obtained. We describe a number of obstacles that had
to be overcome before developing an approach which was
ultimately very effective, and give results on
verification benchmarks using CVC (Cooperating Validity
Checker) and the Chaff SAT solver. The results show a
performance gain of several orders of magnitude over
CVC alone and indicate that the method is more robust
than the heuristics found in CVC's predecessor, SVC.}",
}
@InProceedings{Paper33:CAV:2002,
author = "Ken L. McMillan",
title = "Applying {SAT} methods in Unbounded Symbolic Model
Checking",
crossref = "CAV:2002",
pages = "250--264",
abstract = "{ A method of symbolic model checking is introduced
that uses conjunctive normal form (CNF) rather than
binary decision diagrams (BDD's) and uses a SAT-based
approach to quantifier elimination. This method is
compared to a traditional BDD-based model checking
approach using a set of benchmark problems derived from
the compositional verification of a commercial
microprocessor design.}",
}
@InProceedings{Paper41:CAV:2002,
author = "Edmund M. Clarke and Anubhav Gupta and James Kukula
and Ofer Strichman",
title = "{SAT} based Abstraction-Refinement using {ILP} and
Machine Learning Techniques",
crossref = "CAV:2002",
pages = "265--279",
abstract = "{ We describe new techniques for model checking in the
counterexample guided abstraction/refinement framework.
The abstraction phase `hides' the logic of various
variables, hence considering them as inputs. This type
of abstraction may lead to `spurious' counterexamples,
i.e. traces that can not be simulated on the original
(concrete) machine. We check whether a counterexample
is real or spurious with a SAT checker. We then use a
combination of Integer Linear Programming (ILP) and
machine learning techniques for refining the
abstraction based on the counterexample. The process is
repeated until either a real counterexample is found or
the property is verified. We have implemented these
techniques on top of the model checker NuSMV and the
SAT solver Chaff. Experimental results prove the
viability of these new techniques.}",
}
@InProceedings{Paper36:CAV:2002,
author = "Jesse D. Bingham and Alan J. Hu",
title = "Semi-Formal Bounded Model Checking",
crossref = "CAV:2002",
pages = "280--294",
abstract = "{ This paper presents a novel approach to bounded
model checking. We replace the SAT solver by an
extended simulator of the circuit being verified.
Compared to SAT-solving algorithms, our approach
sacrifices some generality in selecting splitting
variables and in the kinds of learning possible. In
exchange, our approach enables compiled simulation of
the circuit being verified, while our simulator
extension allow us to retain limited learning and
conflict-directed backtracking. The result combines
some of the raw speed of compiled simulation with some
of the search-space pruning of SAT solvers. On example
circuits, our preliminary implementation is competitive
with state-of-the-art SAT solvers, and we provide
intuition for when one method would be superior to the
other. More importantly, our verification approach
continuously knows its coverage of the search space,
providing useful semi-formal verification results when
full verification is infeasible. In some cases, very
high coverage can be attained in a tiny fraction of the
time required for full coverage by either our approach
or SAT solving.}",
}
@InProceedings{Paper14:CAV:2002,
author = "Marco Bozzano and Giorgio Delzanno",
title = "Algorithmic Verification of Invalidation-based
Protocols",
crossref = "CAV:2002",
pages = "295--308",
abstract = "Invalidation-based protocols are widely used for
ensuring the consistency of data distributed on several
nodes as in multi-processors systems, and distributed
file and database systems. The Broadcast Protocols of
Emerson and Namjoshi [EN98] represent a possible formal
model one can use here to specify systems with a finite
but unbounded number of finite-state components. The
symbolic backward reachability algorithm of Esparza,
Finkel and Mayr can be used for checking, fully
automatically, safety properties for this class of
infinite-state systems. This paradigm has however the
following limitation: it requires a preliminary (often
manual) step to abstract the behavior of individual
processes into a finite-state system.",
}
@InProceedings{Paper17:CAV:2002,
author = "Christian Jacobi",
title = "Formal Verification of Complex Out-of-order Pipelines
by Combining Model-Checking and Theorem-Proving",
crossref = "CAV:2002",
pages = "309--323",
abstract = "{ We describe a methodology for the formal
verification of complex out-of-order pipelines as they
may be used as execution units in out-of-order
processors. The pipelines may process multiple
instructions simultaneously, may have branches and
cycles in the pipeline structure, may have variable
latency, and may reorder instructions internally. The
methodology combines model-checking for the
verification of the pipeline control, and theorem
proving for the verification of the pipeline
functionality. In order to combine both techniques, we
formally verify that the FairCTL operators defined in
$\mu$-calculus match their intended semantics expressed
in a form where computation traces are explicit, since
this form is better suited for theorem proving. This
allows the formally safe translation of model-checked
properties of the pipeline control into a
theorem-proving friendly form, which is used for the
verification of the overall correctness of the
pipeline, including functional correctness. As an
example we prove the correctness of the pipeline of a
multiplication/division floating point unit with all
the features mentioned above.}",
}
@InProceedings{Paper35:CAV:2002,
author = "Yannick Chevalier and Laurent Vigneron",
title = "Automated Unbounded Verification of Security
Protocols",
crossref = "CAV:2002",
pages = "324--337",
abstract = "{ We present a new model for automated verification of
security protocols, permitting the use of an unbounded
number of protocol runs. We prove its correctness,
completeness and also that it terminates. It has been
implemented and its efficiency is clearly shown by the
number of protocols successfully studied. In
particular, we present an attack previously unreported
on the Denning-Sacco symmetric key protocol.}",
}
@InProceedings{Paper10:CAV:2002,
author = "Rajeev Alur and Michael McDougall and Zijiang Yang",
title = "Exploiting Behavioral Hierarchy for Efficient Model
Checking",
crossref = "CAV:2002",
pages = "338--342",
abstract = "{ Software modeling languages use hierarchical state
machines for structured specification of control flow.
This paper presents techniques for exploiting the
hierarchical structure for reducing the computational
requirements of algorithms for state-space exploration.
We first report on a tool called Hermes for creating
and manipulating hierarchical models. Then, we propose
heuristics for both enumerative and BDD-based symbolic
algorithms for model checking. We demonstrate the
benefits of our heuristics using case-studies in
analysis of network protocols and of benchmark
sequential circuits.}",
}
@InProceedings{Paper19:CAV:2002,
author = "Marius Bozga and Susanne Graf and Laurent Mounier",
title = "{IF-2.0}: {A} Validation Environment for
Component-Based Real-Time Systems",
crossref = "CAV:2002",
pages = "343--348",
abstract = "{ The development of the IF toolbox was initiated
several years ago, in order to provide an open
validation platform for timed asynchronous systems
(such as telecommunication protocols or distributed
applications). Despite the interest of this toolbox on
specific applications, it appears that some of the
initial design choices, which were made to obtain a
maximal efficiency, are sometimes too restrictive. This
situation motivated the extension of the IF
intermediate representation and, consequently, to
re-consider the architecture of its exploration
engine.}",
}
@InProceedings{Paper20:CAV:2002,
author = "Alessandro Armando and others",
title = "The {AVISS} Security Protocols Analysis Tool",
crossref = "CAV:2002",
pages = "349--353",
abstract = "{ We introduce AVISS, a tool for security protocol
analysis that supports the integration of back-ends
implementing different search techniques, allowing
their systematic and quantitative comparison and paving
the way to their effective interaction. As a
significant example, we have implemented three
back-ends, and then used the AVISS tool to analyze 30
of the 50 problems in the Clark-Jacob's protocol
library.}",
}
@InProceedings{Paper21:CAV:2002,
author = "Eugene Asarin and Gordon Pace and Gerardo Schneider
and Sergio Yovine",
title = "{SPeeDI} -- a Verification Tool for Polygonal Hybrid
Systems",
crossref = "CAV:2002",
pages = "354--358",
abstract = "{ We present a prototype tool SPeeDI for solving the
reachability problem for a class of planar hybrid
systems: polygonal differential inclusions. The tool
implements our reachability algorithm combining several
techniques, namely, (1) the representation of the
two-dimensional continuous dynamics as a
one-dimensional discrete dynamical system, (2) the
characterization of the set of qualitative behaviors of
the latter as a finite set of types of signatures, and
(3) the "acceleration" of the iterations in the case of
cyclic signatures. The tool, programmed in Haskell,
decides reachability, calculates traces, and genrates
graphical representations of systems and traces.}",
}
@InProceedings{Paper22:CAV:2002,
author = "Alessandro Cimatti and others",
title = "{NuSMV2}: an OpenSource tool for symbolic model
checking",
crossref = "CAV:2002",
pages = "359--364",
abstract = "{ NuSMV is a symbolic model checker originated from
the reengineering, reimplementation and extension of
SMV, the first BDD-based model checker developed at
CMU. The NuSMV project aims at the development of a
state-of-the-art symbolic model checker, designed to be
applicable in technology transfer projects, well
structured, open, flexible and documented. \\
This paper describes NuSMV version 2. NuSMV2 inherits
all the functionalities of the previous version, and
extends them in several directions. The main novelty in
NuSMV2 is the integration of model checking techniques
based on propositional satisfiability (SAT), that are
currently enjoying a substantial success in several
industrial fields. To the best of our knowledge, NuSMV2
is currently the only publicly available system that
provides an effective integration of BDD-based and
SAT-based model checking. \\
With NuSMV2, we are also adopting a new development and
license model. NuSMV2 is distributed under the LGPL
OpenSource license, that allows anyone interested to
freely use the tool and to participate in its
development. The aim of the NuSMV OpenSource project is
to provide to the model checking community a common
platform for the research, the implementation, and the
comparison of new symbolic model checking
techniques.}",
}
@InProceedings{Paper30:CAV:2002,
author = "Eugene Asarin and Thao Dang and Oded Maler",
title = "The d/dt tool for Verification of Hybrid Systems",
crossref = "CAV:2002",
pages = "365--370",
abstract = "{ In this paper we describe the tool d/dt which
provides automatic verification of safety properties of
hybrid systems with linear continuous dynamics with
uncertain input. The verification procedure is based on
a method for over-approximating reachable sets by
orthogonal polyhedra. In addition, using reachability
analysis the tool allows to automatically synthesize a
controller which switches the system between continuous
modes in order to satisfy a safety specification.}",
}
@InProceedings{Paper23:CAV:2002,
author = "Orna Kupferman and Nir Piterman and Moshe Y. Vardi",
title = "Model Checking Linear Properties of
Prefix-Recognizable Systems",
crossref = "CAV:2002",
pages = "371--385",
abstract = "{ We develop an automata-theoretic framework for
reasoning about linear properties of infinite-state
sequential systems. Our framework is based on the
observation that states of such systems, which carry a
finite but unbounded amount of information, can be
viewed as nodes in an infinite tree, and transitions
between states can be simulated by finite-state
automata. Checking that the system satisfies a temporal
property can then be done by an alternating two-way
automaton that navigates through the tree. For
branching properties, the framework is known and the
two-way alternating automaton is a tree automaton.
Applying the framework for linear properties results in
algorithms that are not optimal. Indeed, the fact that
a tree automaton can split to copies and simultaneously
read all the paths of the tree has a computational
price and is irrelevant for linear properties. We
introduce path automata on trees. The input to a path
automaton is a tree, but the automaton cannot split to
copies and it can read only a single path of the tree.
In particular, two-way nondeterministic path automata
enable exactly the type of navigation that is required
in order to check linear properties of infinite-state
sequential systems. \\
As has been the case with finite-state systems, the
automata-theoretic framework is quite versatile. We
demonstrate it by solving several versions of the
model-checking problem for LTL specifications and
prefix-recognizable systems. Our algorithm is
exponential in both the size of (the description of)
the system and the size of the LTL specification, and
we prove a matching lower bound. This is the first
optimal algorithm for solving the LTL model-checking
problem for prefix recognizable systems. Our framework
also handles systems with regular labeling, and in fact
we show that LTL model checking with respect to
pushdown systems with regular labeling is intereducible
with LTL model checking with respect to
prefix-recognizable systems with simple labeling.}",
}
@InProceedings{Paper40:CAV:2002,
author = "Tatiana Rybina and Andrei Voronkov",
title = "Using canonical representations of solutions to speed
up infinite-state model checking",
crossref = "CAV:2002",
pages = "386--400",
abstract = "{ In this paper we discuss reachability analysis for
infinite-state systems in which states can be
represented by a vector of integers. We propose a new
algorithm for verifying reachability properties based
on canonical representations of solutions instead of
decision procedures for integer or real arithmetic.
Experimental results demonstrate that problems in
protocol verification which are beyond the reach of
other existing systems can be solved completely
automatically.}",
}
@InProceedings{Paper2:CAV:2002,
author = "Walter Hartong and Lars Hedrich and Erich Barke",
title = "On Discrete Modeling and Model Checking for Nonlinear
Analog Systems",
crossref = "CAV:2002",
pages = "401--413",
abstract = "{ In this contribution we present a new method for
developing discrete models for nonlinear analog
systems. Using an adaptive state space intersection
method the main nonlinear properties of the analog
system can be retained. Consequently, digital model
checking ideas can be applied to analog systems. To
describe analog specification properties an extension
to the standard model checking language CTL and the
appropriate, algorithmic modifications are needed. Two
nonlinear examples are given to show the feasibility
and the advantages of this method.}",
}
@InProceedings{Paper38:CAV:2002,
author = "Arindam Chakrabarti and Luca de Alfaro and Thomas A.
Henzinger and Freddy Y. C. Mang",
title = "Synchronous and Bidirectional Component Interfaces",
crossref = "CAV:2002",
pages = "414--427",
abstract = "{ Component-based designs are typically conceived
using an "optimistic" approach: a component is designed
under some assumptions about its environment, with the
expectation that the assumptions will be satisfied in
the complete design. In turn, the design may describe
the behavior of the component only when the component
is in an environment that satisfies the assumptions. We
present interface models to capture this approach to
design. In these models, an interface describes both
the input assumptions of a component, and its output
behavior. By enabling us to check that the input
assumptions of a component are met in a design,
interface models provide a basic kind of compatibility
check for component-based design. When refining a
design into an implementation, interface models require
that the implementation behavior of a component
satisfies the design specification only when the input
assumptions of the component are satisfied, yielding
greater flexibility in the choice of implementations.
\\
We present two interface models in detail, one for a
simple synchronous form of interaction between
components typical in hardware, and the other for more
complex synchronous interactions on bidirectional
connections. As example for the latter, we specify the
interface of a bidirectional bus, with the input
assumption that at any time at most one component has
write access to the bus. For these interface models, we
present algorithms for compatibility and refinement
checking, as well as efficient symbolic
implementations. We also show how these interface
models lead to a rich methodology for the
component-based design and analysis of synchronous
systems.}",
}
@InProceedings{Paper44:CAV:2002,
author = "Arindam Chakrabarti and Luca de Alfaro and Thomas A.
Henzinger and Marcin Jurdzi{\'n}ski and Freddy Y. C.
Mang",
title = "Interface Compatibility Checking for Software
Objects",
crossref = "CAV:2002",
pages = "428--441",
abstract = "{ We present a formal methodology and tool for
uncovering errors in the interaction of software
objects. Our methodology consists of a suite of
languages for defining object interfaces, and
algorithms for checking interface compatibility. An
object interface makes assumptions about the
environment in the form of call and availability
constraints. A call constraint restricts the
accessibility of local methods to certain external
methods. An availability constraint restricts the
accessibility of local methods to certain states of the
object. For example, an interface for a file server
with local methods open and read may assert that a file
cannot be read without having been opened. Checking
interface compatibility requires the solution of games,
and in the case of object interfaces with availability
constraints, of pushdown games. Using object
interfaces, we have uncovered incompatibilities in
TinyOS, a small operating system for sensor nodes in
adhoc networks.}",
}
@InProceedings{Paper45:CAV:2002,
author = "Michael A. Col{\'o}n and Henny B. Sipma",
title = "Practical Methods for Proving Program Termination",
crossref = "CAV:2002",
pages = "442--454",
abstract = "{ We present two algorithms to prove termination of
programs by synthesizing linear ranking functions. The
first uses an invariant generator based on iterative
forward propagation with widening, and extracts ranking
functions from the generated invariants by manipulating
polyhedral cones. It is capable of finding subtle
ranking functions which are linear combinations of many
program variables, but is limited to short programs
with few variables. \\
The second, more heuristic, algorithm targets the class
of structured programs with single-variable ranking
functions. Its invariant generator uses a heuristic
extrapolation operator to avoid iterative forward
propagation over program loops. For the programs we
have considered, this approach converges faster and the
invariants it discovers are sufficiently strong to
imply the existence of ranking functions.}",
}
@InProceedings{Paper12:CAV:2002,
author = "Li Tan and Rance Cleaveland",
title = "Evidence-Based Model Checking",
crossref = "CAV:2002",
pages = "455--470",
abstract = "{ This paper shows that different
``meta-model-checking'' analyses can be conducted
efficiently on a generic data structure we call a
\emph{support set}. Support sets may be viewed as
abstract encodings of the ``evidence'' a model checker
uses to justify the yes/no answers it computes. We
indicate how model checkers may be modified to compute
supports sets without compromising their time or space
complexity. We also show how support sets may be used
for a variety of different analyses of model-checking
results, including: the generation of diagnostic
information for explaining negative model-checking
results; and certifying the results of model checking
(is the evidence internally consistent?).}",
}
@InProceedings{Paper18:CAV:2002,
author = "Gianpiero Cabodi and Sergio Nocco and Stefano Quer",
title = "Mixing Forward and Backward Traversals in
Guided-Prioritized {BDD}-Based Verification",
crossref = "CAV:2002",
pages = "471--484",
abstract = "{ Over the last decade BDD-based symbolic
manipulations have been among the most widely used core
technologies in the verification domain and various
methodologies have been proposed to improve their
efficiency. Following some of the most successful
trends proposed in this field, we present in this paper
a very promising approach to solve Unbounded Model
Checking problems. \\
Our approach is based on: Mixing forward and backward
traversals, dovetailing approximate and exact methods,
adopting guided and partitioned searches, efficiently
using conjunctive decompositions and generalized
cofactor based BDD simplifications. A major
contribution of this paper is a backward verification
procedure based on a prioritized traversal, that we
call "inbound-path-search". The method consists in
partitioning state sets in terms of the estimated
distance from the "target" set of states, and giving
higher priority in the search procedure to the subset
with smallest estimated distance. An initial
approximate forward traversal produces
over-approximated onion-ring frontier sets that are
used as distance estimators and guides for prioritized
backward traversal. The resulting method is exact and
it does not produce any false negatives. \\
We experimentally compare this methodology with
approximate-reachability don't cares in model checking,
a state-of-the-art BDD-based technique (implemented in
the freely available VIS tool) combining approximate
and exact search. We show that we are able to
accomplish verification tasks outside its present
scope.}",
}
@InProceedings{Paper42:CAV:2002,
author = "Mitra Purandare and Fabio Somenzi",
title = "Vacuum Cleaning {CTL} Formulae",
crossref = "CAV:2002",
pages = "485--499",
abstract = "{ Vacuity detection in model checking looks for
properties that hold in a model, and can be
strengthened without causing them to fail. Such
properties often signal problems in the model, its
environment, or the properties themselves. The seminal
paper of Beer et al. proposed an efficient algorithm
applicable to a restricted set of properties.
Subsequently, Kupferman and Vardi extended vacuity
detection to more expressive specification mechanisms.
They advocated a more minute examination of temporal
logic formulae than the one adopted in [Beer97].
However, they did not address the issues of
practicality and usefulness of this more scrupulous
inspection. In this paper we discuss efficient
algorithms for the detection of vacuous passes of
temporal logic formulae, showing that a thorough
vacuity check for CTL formulae can be carried out with
very small overhead, and even, occasionally, in less
time than plain model checking. We also demonstrate the
usefulness of such a careful analysis with the help of
case studies.}",
}
@InProceedings{Paper1:CAV:2002,
author = "Aaron Stump and Clark W. Barrett and David L. Dill",
title = "{CVC}: a Cooperating Validity Checker",
crossref = "CAV:2002",
pages = "500--504",
abstract = "{ Decision procedures for decidable logics and logical
theories have proven to be useful tools in
verification. This paper describes the CVC (``a
Cooperating Validity Checker'') decision procedure. CVC
implements a framework for combining subsidiary
decision procedures for certain logical theories into a
decision procedure for the theories' union. Subsidiary
decision procedures for theories of arrays, inductive
datatypes, and linear real arithmetic are currently
implemented. Other notable features of CVC are the
incorporation of the high-performance GRASP solver for
propositional reasoning, and the ability to produce
independently checkable proofs for valid formulas.}",
}
@InProceedings{Paper3:CAV:2002,
author = "Marsha Chechik and Benet Devereux and Arie Gurfinkel",
title = "{$\xi$Chek}: {A} Multi-Valued Model-Checker",
crossref = "CAV:2002",
pages = "505--509",
abstract = "{ This paper describes a symbolic multi-valued
model-checker XChek.}",
}
@InProceedings{Paper7:CAV:2002,
author = "Shoham Ben-David and Anna Gringauze and Baruch Sterin
and Yaron Wolfsthal",
title = "{PathFinder}: {A} Tool for Design Exploration",
crossref = "CAV:2002",
pages = "510--514",
abstract = "{ In this paper we present a tool called PathFinder,
which exploits the power of model checking for
developing and debugging newly-written hardware
designs. Our tool targets the community of design
engineers, who-in contrast to verification
engineers-are not versed in formal verification, and
therefore have traditionally been distant from the
growing industry momentum in the area of model
checking. \\
PathFinder provides a means for the designer to
explore, debug and gain insight into the behaviors of
the design at a very early stage of the
implementation-even before their design is complete. In
the usage paradigm enabled by PathFinder, which we call
Design Exploration, the design engineer specifies a
behavior of interest, and the tool then finds and
demonstrates-graphically-a set of execution traces
compliant with the specified behavior, if any exist.
When presented with each such execution sequence, the
designer is essentially furnished with an insight into
the design behavior, and specifically with an example
of a concrete scenario in which the behavior of
interest occurs. This scenario can then be closely
inspected, refined, or abandoned in favor of another
scenario.}",
}
@InProceedings{Paper16:CAV:2002,
author = "Dennis Dams and William Hesse and Gerard J. Holzmann",
title = "Abstracting {C} with {abC}",
crossref = "CAV:2002",
pages = "515--520",
abstract = "{ A conceptually simple and practically very useful
form of data abstraction in model checking is variable
hiding, which amounts to suppressing all information
about a given set of variables. The abC tool automates
this for programs written in the C programming
language. It features an integrated demand-driven
pointer analysis, and has been implemented as an
extension of GCC.}",
}
@InProceedings{Paper24:CAV:2002,
author = "Alex Groce and Doron A. Peled and Mihalis Yannakakis",
title = "{AMC}: An Adaptive Model Checker",
crossref = "CAV:2002",
pages = "521--525",
abstract = "{ The AMC (for adaptive model checking) system allows
one to perform model checking directly on a system,
even when its internal structure is unknown or
invisible. It also allows one to perform model checking
using an inaccurate model, incrementally improving the
model each time that a false negative (i.e., not an
actual) counterexample is found.}",
}
@InProceedings{Paper39:CAV:2002,
author = "Thomas A. Henzinger and Ranjit Jhala and Rupak
Majumdar and George C. Necula and Gr{\'e}goire Sutre
and Westley Weimer",
title = "Temporal-Safety Proofs for Systems Code",
crossref = "CAV:2002",
pages = "526--538",
abstract = "{ We present a methodology and tool for verifying and
certifying systems code. The verification is based on
the lazy-abstraction paradigm for intertwining the
following three logical steps: construct a predicate
abstraction from the code, model check the abstraction,
and automatically refine the abstraction based on
counterexample analysis. The certification is based on
the proof-carrying code paradigm. Lazy abstraction
enables the automatic construction of small proof
certificates. The methodology is implemented in Blast,
the Berkeley Lazy Abstraction Software Verification
Tool. We describe our experience applying Blast to
Linux and Windows device drivers and we describe how
error traces are produced for erroneous drivers and
easily checkable correctness certificates are produced
for the correct drivers.}",
}
@InProceedings{Paper25:CAV:2002,
author = "Ahmed Bouajjani and Tayssir Touili",
title = "Extrapolating Tree Transformations",
crossref = "CAV:2002",
pages = "539--554",
abstract = "{ We consider the framework of {\em regular tree model
checking} where sets of configurations of a system are
represented by regular tree languages and its dynamics
is modeled by a term rewriting system (or a regular
tree transducer). We focus on the computation of the
reachability set $R^*(L)$ where $R$ is a regular tree
transducer and $L$ is a regular tree language. The
construction of this set is not possible in general.
Therefore, we present a general acceleration technique,
called {\em regular tree widening} which allows to
speed up the convergence of iterative fixpoint
computations in regular tree model checking. This
technique can be applied uniformly to various kinds of
transformations. \\
We show the application of our framework to different
analysis contexts: verification of parametrized tree
networks and data-flow analysis of multithreaded
programs. Parametrized networks are modeled by
relabeling tree transducers, and multithreaded programs
are modeled by term rewriting rules encoding
transformations on control structures. \\
We prove that our widening technique can emulate many
existing algorithms for special classes of
transformations and we show that it can deal with
transformations beyond the scope of these
algorithms.}",
}
@InProceedings{Paper28:CAV:2002,
author = "Parosh Aziz Abdulla and Bengt Jonsson and Pritha
Mahata and Julien d'Orso",
title = "Regular Tree Model Checking",
crossref = "CAV:2002",
pages = "555--568",
abstract = "{ In this paper, we present an approach for
algorithmic verification of infinite-state systems with
a parameterized tree topology. Our work is a
generalization of regular model checking, where we
extend the work done with strings toward trees. States
are represented by trees over a finite alphabet, and
transition relations by regular, structure preserving
relations on trees. We use an automata theoretic method
to compute the transitive closure of such a transition
relation. Although the method is incomplete, we present
sufficient conditions to ensure termination. \\
We have implemented a prototype for our algorithm and
show the result of its application on a number of
examples.}",
}
@InProceedings{Paper37:CAV:2002,
author = "Robert Kurshan and Vladimir Levin and Husnu Yenigun",
title = "Compressing Transitions for Model Checking",
crossref = "CAV:2002",
pages = "569--581",
abstract = "{ An optimization technique is presented that
compresses a chain of transitions into a single jump
transition, thus making a model smaller prior to model
checking. We give compression algorithms, together with
conditions that allow such compressions to preserve
next-time-free LTL. Experimental results are presented
and discussed. \\
Keywords: model checking, partial order reduction}",
}
@InProceedings{Paper4:CAV:2002,
author = "Victor Khomenko and Maciej Koutny and Walter Vogler",
title = "Canonical Prefixes of {P}etri Net Unfoldings",
crossref = "CAV:2002",
pages = "582--595",
abstract = "{ In this paper, we develop a general technique for
truncating Petri net unfoldings, parameterized
according to the level of information about the
original unfolding one wants to preserve. Moreover, we
propose a new notion of completeness of a truncated
unfolding. A key aspect of our approach is an
algorithm-independent notion of cut-off events, used to
truncate a Petri net unfolding. Such a notion is based
on a cutting context and results in the unique
canonical prefix of the unfolding. Canonical prefixes
are complete in the new, stronger sense, and we provide
necessary and sufficient conditions for its finiteness,
as well as upper bounds on its size in certain cases. A
surprising result is that after suitable
generalization, the standard unfolding algorithm
presented in [Esparza, Roemer and Vogler: An
Improvement of McMillan's Unfolding Algorithm. Proc.
TACAS'96] and the parallel unfolding algorithm proposed
in [Heljanko, Khomenko and Koutny: Parallelisation of
the Petri Net Unfolding Algorithm. Proc. of TACAS'02],
despite being non-deterministic, generate the canonical
prefix. This gives an alternative correctness proof for
the former algorithm, and a new (much simpler) proof
for the latter one.}",
}
@InProceedings{Paper8:CAV:2002,
author = "Stefan Blom and Jaco van de Pol",
title = "State Space Reduction by Proving Confluence",
crossref = "CAV:2002",
pages = "596--609",
abstract = "{ We present a modular method for on-the-fly state
space reduction. The theoretical foundation of the
method is a new confluence notion for labeled
transitions systems. The method works by adding
confluence information to the symbolic representation
of the state space. We present algorithms for
on-the-fly exploration of the reduced state space, for
generating confluence information and for a symbolic
reduction, called prioritization. The latter two
algorithms rely on an automated theorem prover to
derive the necessary information. We also present some
case studies in which tools that implement these
algorithms were used.}",
}
@InProceedings{Paper43:CAV:2002,
author = "Sankar Gurumurthy and Roderick Bloem and Fabio
Somenzi",
title = "Fair Simulation Minimization",
crossref = "CAV:2002",
pages = "610--623",
abstract = "{ We present an algorithm for the minimization of
Buechi automata based on the notion of \emph{fair
simulation} introduced in [HKR97]. Unlike direct
simulation, fair simulation allows flexibility in the
satisfaction of the acceptance conditions, and hence
leads to larger relations. However, it is not always
possible to remove edges to simulated states or merge
simulation equivalent states without altering the
language of the automaton. Solutions proposed in the
past consisted in checking sufficient conditions [SB00,
Theorem 3], or resorting to more restrictive notions
like delayed simulation [EWS01]. By contrast, our
algorithm exploits the full power of fair simulation by
efficiently checking the correctness of changes to the
automaton (both mergers of states and removal of
edges).}",
}
@Proceedings{CAV:2002,
editor = "Ed Brinksma and Kim Guldstrand Larsen",
title = "Computer Aided Verification",
booktitle = "Computer Aided Verification",
conference = "14th International Conference",
key = "CAV",
year = "2002",
month = jul # " 27--31",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2404",
publisher = "Springer-Verlag",
ISBN = "3-540-43997-8",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CICLOPS - Colloquium on Implementation of Constraint and {LO}gic Programmi...
% ===================================================================
@InProceedings{Schrijvers:CICLOPS:2002,
author = "Tom Schrijvers",
title = "Combining an improvement to {PARMA} trailing with
analysis in {HAL}",
crossref = "CICLOPS:2002",
pages = "1--12",
abstract = "Trailing of bindings in the PARMA variable
representation is expensive in time and space. Two
schemes are presented that lower its cost: the first is
a technique that halves the space cost of trailing in
PARMA. It can be used with conditional and
unconditional trailing. It is illustrated and evaluated
in the context of dProlog and in the Mercury backend of
HAL. The second scheme combines a variant of a
previously developed trailing analysis with the first
technique. Empirical evidence shows the usefulness of
these schemes and that the combination is more
effective than each scheme apart.",
}
@InProceedings{FerreiraDamas:CICLOPS:2002,
author = "Michel Ferreira and Luis Damas",
title = "{WAM} Local Analysis",
crossref = "CICLOPS:2002",
pages = "13--25",
abstract = "The abstract interpretation framework has been used
mainly in the \emph{global} analysis of programs. Most
often also, this interpretation is applied to the
source Prolog program. In this paper we present an
abstract interpretation of more {\em local} nature, and
applied to the intermediate code (WAM). The purpose of
obtaining a more efficient specialized version of the
program remains the same as in global analysis
approaches. Our specialization is multiple, meaning
that we generate a different version for each entry
pattern detected by analysis. This poly-variant
unfolding of predicates allows the local (predicate
level) analysis to propagate inter-procedurally
relevant information. Besides time and complexity
reduction of local versus global analysis, our approach
is suited for {\em goal-independent} specialization,
and for the partial selection of predicates to
specialize. The evaluation of this more general
specialization of programs in a full compiler shows
that it is an alternative to global and goal-dependent
methods.",
}
@InProceedings{DouenceJussien:CICLOPS:2002,
author = "R{\'e}mi Douence and Narendra Jussien",
title = "Non-intrusive constraint solver enhancements",
crossref = "CICLOPS:2002",
pages = "26--36",
abstract = "Using conflict sets (or nogoods) and explanations
within constraint programming has been proved very
effective. However, most constraint solvers do not
provide this feature. This statement could have been
made for many other improvements. Indeed, one of the
main reasons of that fact is that many improvements in
constraint programming are \textbf{intrusive}: their
integration requires a general modification of the
solvers' implementation and/or architecture. The core
part of constraint solvers is often quite simple,
however, it represents only a small part of the
implementation. The main part of the code is devoted to
specific constraint handling, global constraints,
search techniques, API, etc. Modifying this code
requires a real development effort that may become
overly costly. Constraint solvers need non intrusive
approaches. Actually, solvers should not be modified at
all and only a general information about implementation
should be needed to integrate improvements. In this
paper, we present a technique used in software
engineering to reach that aim: aspect oriented
programming. As an example, the non intrusive
integration of conflict set generation and use is
presented and some insights of what could be done are
provided.",
}
@InProceedings{PinedaBueno:CICLOPS:2002,
author = "Angel Pineda and Francisco Bueno",
title = "The {O'Ciao} Approach to {OO} {LP}",
crossref = "CICLOPS:2002",
pages = "37--48",
abstract = "There have been quite a number of proposals for the
integration of Object Oriented Programming features
into Logic Programming, resulting in much support
theory and several languages. However, none of these
proposals seem to have made it into the mainstream.
Perhaps one of the reasons for this is that the
resulting languages depart too much from the standard
logic programming languages to entice the average
Prolog programmer. Another reason may be that most of
what can be done with object-oriented programming can
already be done in Prolog through the meta- and
higher-order programming facilities that the language
includes, albeit sometimes in a more cumbersome way. In
light of this, in this paper we propose an alternative
solution which is driven by two main objectives. The
first one is to explicitly include at the language
level only those characteristics of object-oriented
programming which are cumbersome to implement in
standard Prolog systems. The second one is to do this
in such a way that there is a minimum impact on the
syntax and complexity of the language, i.e., to
introduce the minimum number of new constructs and
concepts. Finally, we would also like the
implementation to be as straightforward as possible,
ideally based on simple source to source expansions.",
}
@InProceedings{CarroHermenegildo:CICLOPS:2002,
author = "Manuel Carro and Manuel Hermenegildo",
title = "A Simple Approach to Distributed Objects in {P}rolog",
crossref = "CICLOPS:2002",
pages = "49--61",
abstract = "We present the design of a distributed object systems
for Prolog, based on adding remote execution and
distribution capabilities to a previously existing
object system. Remote execution brings RPC into a
Prolog system, and its semantics are easy to express in
terms of well-known Prolog builtins. The final
distributed object design features state mobility and a
user-transparent network behavior. We sketch an
implementation which provides distributed garbage
collection and some degree of resilience to network
failures. We provide a preliminary study of the
overhead of the communication mechanism for some test
cases.",
}
@InProceedings{GuoGupta:CICLOPS:2002,
author = "Hia-Feng Guo and Gopal Gupta",
title = "Cuts in Tabled Logic Programming",
crossref = "CICLOPS:2002",
pages = "62--73",
abstract = "Tabled logic programming (LP) systems alter the
operational semantics of Prolog since they employ
dynamic computation rules that are different from SLD
resolution. This also requires changes to the
operational semantics of cuts in tabled LP systems. In
this paper we explore the incorporation of cuts in
tabled logic programming systems, in particular, those
based on our dynamic reordering of alternatives (DRA)
technique. We present a reasonable operational
semantics to cuts for DRA based systems and describe
its implementation in the TALS tabled Prolog system. We
also address the issues and problems that arise in
handling cuts faced by other tabled Prolog systems. We
argue that DRA based systems have an operational
semantics closer to Prolog's, and therefore support
cuts easily.",
}
@InProceedings{CastroCostaLopes:CICLOPS:2002,
author = "Luis F. Castro and Vitor S. Costa and Ricardo Lopes",
title = "On the Cache Performance of {P}rolog Systems",
crossref = "CICLOPS:2002",
pages = "74--85",
abstract = "One critical issue in the design of declarative
languages is their memory performance, both in terms of
total memory usage and locality in memory accesses.
Early studies of Prolog programs have shown the
language to have excellent characteristics in this
regard. In this work we study the memory performance of
two modern Prolog systems through detailed
instruction-level simulation. Our work aims at
addressing several important questions in the
implementation of logic programming systems. Through
comparative analysis, we can study to what extent
memory performance depends on the WAM design and on
actual implementation. We study how deterministic and
non-deterministic programs fare comparatively regarding
locality. And we evaluate examples of larger Prolog
applications that do rely extensively on built-ins. One
issue of great importance we address in this work is
the impact of garbage collection. We study the cache
impact of running the garbage collector, and how it
affects system efficiency. Our results show that
garbage collection can both be very helpful to system
performance, or damage cache-performance significantly,
and explain why.",
}
@Proceedings{CICLOPS:2002,
title = "Colloquium on Implementation of Constraint and {LO}gic
Programming Systems",
booktitle = "Colloquium on Implementation of Constraint and {LO}gic
Programming Systems",
key = "CICLOPS",
year = "2002",
month = jul # " 31",
venue = "Copenhagen, Denmark",
series = "CW Reports",
volume = "344",
institution = "Department of Computer Science, K.U.Leuven",
address = "Leuven, Belgium",
URL = "http://www.cs.kuleuven.ac.be/publicaties/rapporten/cw/CW344.abs.html",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLIMA - Computational Logic in Multi-Agent Systems
% ==========================================
@InProceedings{SchroederSchweimeier:CLIMA:2002,
author = "Michael Schroeder and Ralf Schweimeier",
title = "Arguments and Misunderstandings: Fuzzy Unification for
Negotiating Agents",
crossref = "CLIMA:2002",
pages = "1--18",
abstract = "In this paper, we develop the notion of fuzzy
unification and incorporate it into a novel fuzzy
argumentation framework for extended logic programming.
We make the following contributions: The argumentation
framework is defined by a declarative bottom-up
fixpoint semantics and an equivalent goal-directed
top-down proof-procedure for extended logic
programming. Our framework allows one to represent
positive and explicitly negative knowledge, as well as
uncertainty. Both concepts are used in agent
communication languages such as KQML and FIPA ACL. One
source of uncertainty in open systems stems from
mismatches in parameter and predicate names and missing
parameters. To this end, we conservatively extend
classical unification and develop fuzzy unification
based on normalised edit distance over trees.",
}
@InProceedings{LeiteAlferesPereiraPrzymusinskaPrzymusinski:CLIMA:2002,
author = "Jo{\~a}o Alexandre Leite and Jos{\'e} J{\'u}lio
Alferes and Lu{\'\i}s Moniz Pereira and Halina
Przymusinska and Teodor C. Przymusinski",
title = "A Language for Multi-dimensional Updates",
crossref = "CLIMA:2002",
pages = "19--34",
abstract = "Dynamic Logic Programming (DLP) was introduced to deal
with knowledge about changing worlds, by assigning
semantics to sequences of generalized logic programs,
each of which represents a state of the world. These
states permit the representation, not only of time, but
also of specificity, strength of updating instance,
hierarchical position of the knowledge source, etc.
Subsequently, the Language of Updates LUPS was
introduced to allow for the association, with each
state, of a set of transition rules. It thereby
provides for an interleaving sequence of states and
transition rules within an integrated declarative
framework. DLP (and LUPS), because defined only for a
linear sequence of states, cannot deal simultaneously
with more than a single dimension (e.g. time,
hierarchies,...). To overcome this limitation,
Multi-dimensional Dynamic Logic Programming (MDLP) was
therefore introduced, so as to make it possible to
organize states into arbitrary acyclic digraphs (DAGs).
In this paper we now extend LUPS, setting forth a
Language for Multi-dimensional Updates (MLUPS). MLUPS
admits the specification of flexible evolutions of such
DAG organized logic programs, by allowing not just the
specification of the logic programs representing each
state, but to the evolution of the DAG topology itself
as well.",
}
@InProceedings{KakasMoraitis:CLIMA:2002,
author = "Antonis C. Kakas and Pavlos Moraitis",
title = "Argumentative Agent Deliberation, Roles and Context",
crossref = "CLIMA:2002",
pages = "35--48",
abstract = "This paper presents an argumentation based framework
to support an agent's deliberation process for drawing
conclusions under a given policy. The argumentative
policy of the agent is able to take into account the
roles agents can have within a context pertaining to an
environment of interaction.",
}
@InProceedings{ToyamaKojimaInagaki:CLIMA:2002,
author = "Katsuhiko Toyama and Takahiro Kojima and Yasuyoshi
Inagaki",
title = "Translating Multi-Agent Autoepistemic Logic into Logic
Program",
crossref = "CLIMA:2002",
pages = "49--62",
abstract = "Multi-agent autoepistemic Logic (MAEL) is a natural
framework to formalize beliefs and reasoning including
inheritance, persisitence, and causality. To develop a
proof procedure of it, we introduce a method that
translates a MAEL theory into a logic program with
integrity constraints. We also investigate relations
between MAEL and other nonmonotonic reasonings.",
}
@InProceedings{DellAcquaNilssonPereira:CLIMA:2002,
author = "Pierangelo Dell'Acqua and Ulf Nilsson and Lu{\'\i}s
Moniz Pereira",
title = "A Logic Based Asynchronous Multi-Agent System",
crossref = "CLIMA:2002",
pages = "63--78",
abstract = "We present a logic programming based asynchronous
multi-agent system in which agents can: communicate
with one another; update themselves and each other;
eliminate contradictory update rules; abduce hypotheses
to explain observations, and use them to generate
actions. The knowledge base of the agents is comprised
of generalized logic programs, integrity constraints,
active rules, and of abducibles. We characterize the
interaction among agents via an asynchronous transition
rule system, and provide a stable models based
semantics. An example is developed to illustrate how
our approach functions.",
}
@InProceedings{HarlandWinikoff:CLIMA:2002,
author = "James Harland and Michael Winikoff",
title = "Language Design Issues for Agents based on Linear
Logic (Extended Abstract)",
crossref = "CLIMA:2002",
pages = "79--93",
abstract = "Agent systems based on the {\em Belief}, {\em Desire}
and {\em Intention} model of Rao and Georgeff have been
used for a number of successful applications. However,
it is often difficult to learn how to apply such
systems, due to the complexity of both the semantics of
the system and the computational model. In addition,
there is a gap between the semantics and the concepts
that are presented to the programmer. One way to bridge
this gap is to re-cast the foundations of such systems
into a logic programming framework. In particular, the
integration of backward- and forward-chaining
techniques for linear logic provides a natural starting
point for this investigation. In this paper we discuss
the language design issues for such a system, and
particularly the way in which the potential choices for
rule evaluation in a forward-chaining manner is crucial
to the behaviour of the system.",
}
@InProceedings{BordiniMoreira:CLIMA:2002,
author = "Rafael H. Bordini and {\'A}lvaro F. Moreira",
title = "Proving the Asymmetry Thesis Principles for a {BDI}
Agent-Oriented Programming Language",
crossref = "CLIMA:2002",
pages = "94--108",
abstract = "In this paper, we consider each of the nine principles
of BDI logics as defined by Rao and Georgeff based on
Bratman's asymmetry thesis, and we verify which ones
are satisfied by Rao's AgentSpeak(L), a computable
logic language inspired by the BDI architecture for
cognitive agents. This is in line with Rao's original
motivation for defining AgentSpeak(L): to bridge the
gap between theory and practice of BDI agent systems.
In order to set the grounds for the proof, we first
introduce a particular way in which to define the
informational, motivational, and deliberative
modalities of BDI logics for AgentSpeak(L) agents,
according to its structural operational semantics (that
we introduced in a recent paper). This provides a
framework that can be used to investigate further
properties of AgentSpeak(L) agents, contributing
towards giving firm theoretical grounds for BDI agent
programming.",
}
@InProceedings{AraragiTakataNaoyuki:CLIMA:2002,
author = "Tadashi Araragi and Shiro Takata and Naoyuki Nide",
title = "A Verification Method for a Commitment Strategy of the
{BDI} Architecture",
crossref = "CLIMA:2002",
pages = "109--122",
abstract = "We present a method to solve a verification problem
that arises in implementing a commitment strategy for
the BDI architecture. This problem introduces a new
aspect of verification such that a state transition
depends on a verification done at each state. We
formalize this problem and give a decision procedure
for the verification.",
}
@InProceedings{NideTakataAraragi:CLIMA:2002,
author = "Naoyuki Nide and Shiro Takata and Tadashi Araragi",
title = "Deduction Systems for {BDI} Logics with Mental State
Consistency",
crossref = "CLIMA:2002",
pages = "123--135",
abstract = "BDI Logics, introduced by Rao et al., have been used
as the theoretical basis of specification and
implementation of rational agents. The aim of our
research is to make full use of the expressive power of
BDI Logics as executable specification languages of
rational agents. To this end, we previously presented
deduction systems for CTL-based propositional BDI
Logics using sequent calculus. Since these systems have
a decision algorithm that is extended from Wang's
algorithm, they are suitable for applications such as
automatic proving. However, they do not incorporate
mental state consistency features, which are important
for dealing with rational agents. In this paper, we
extend our deduction systems by introducing mental
state consistency features and explain their soundness
and completeness. This approach allows us to check and
prove the specifications and properties described by
BDI Logics for rational agents.",
}
@InProceedings{HayashiChoOhsuga:CLIMA:2002,
author = "Hisashi Hayashi and Kenta Cho and Akihiko Ohsuga",
title = "Speculative Computation and Action Execution in
Multi-Agent Systems",
crossref = "CLIMA:2002",
pages = "136--148",
abstract = "In some multi-agent systems, when an agent cannot
retrieve information from another agent, the agent
makes an assumption and tentatively performs the
computation. When the agent comes across a mistake in
the preliminary assumption, the computation is
modified. This kind of speculative computation is
effective when the assumption is correct. However, once
the agent executes an action, it is impossible to
modify the computation in these systems. This paper
shows how to integrate speculative computation and
action execution through logic programming.",
}
@InProceedings{IwanumaInoue:CLIMA:2002,
author = "Koji Iwanuma and Katsumi Inoue",
title = "Conditional Answer Computation in {SOL} as Speculative
Computation in Multi-Agent Environments",
crossref = "CLIMA:2002",
pages = "149--162",
abstract = "In this paper, we study speculative computation in a
master-slave multi-agent system where reply messages
sent from slave agents to a master are always tentative
and may change from time to time. In this system,
default values used in speculative computation are only
partially determined in advance. Inoue {\it et
al.}~[2001] formalized speculative computation in such
an environment with tentative replies, using the
framework of a first-order consequence-finding
procedure SOL with the well-known answer literal
method. We shall further refine SOL calculus, using
{\em conditional answer} computation and {\em
skip-preference\/} in SOL. The conditional answer
format has an great advantage of explicitly
representing the dependency relation to tentative
replies and defaults which are used to derive a
conclusion. The dependency representation is
significantly important to avoid unnecessary
recomputation of tentative conclusions. The
skip-preference has the great ability of preventing
irrational/redundant derivations. Finally, we show an
incremental answer computation method within the SOL
tableau calculus.",
}
@InProceedings{Bolander:CLIMA:2002,
author = "Thomas Bolander",
title = "Maximal Introspection of Agents",
crossref = "CLIMA:2002",
pages = "163--176",
abstract = "This paper concerns syntactical representations of
introspective belief and knowledge in multi-agent
systems. It is well-known that reasoning frameworks for
introspective beliefs easily become inconsistent as a
consequence of the presence of paradoxical
self-reference. In the paper we explore the maximal
sets of introspective beliefs that an agent can
consistently obtain and retain. Hereby some previous
results by Perlis [1985] and Rivi{\`e}re {\&} Levesque
[1988] are generalized.",
}
@InProceedings{Torroni:CLIMA:2002,
author = "Paolo Torroni",
title = "Logics and Multi-agents: towards a new symbolic model
of cognition",
type = "Panel Discussion Abstract",
crossref = "CLIMA:2002",
pages = "177--180",
abstract = "In the last years, Computational Logic proved to be a
successful approach to several aspects of Multi-Agent
Systems design. Some examples of it are logic
programming-based agent reasoning and model
checking-based verification techniques, applied to
agents and agent systems. At the same time, from the
Computational Logic side we are witnessing a growth in
the interest for Multi-Agent Systems. Some recent
directions of research seem to push towards a new idea
of intelligent systems, and the metaphor of intelligent
individuals that are situated into dynamic environments
and that can interact with each other, updating their
mutual beliefs, is being regarded as the basis for a
new symbolic model of cognition. It is our intention to
propose some open questions about this new perspective
to warm up a discussion panel for CLIMA'02. It is our
belief that answers to them could foster a significant
advance in both the Multi-Agent Systems and
Computational Logic research of the next years.",
}
@Proceedings{CLIMA:2002,
editor = "J{\"u}rgen Dix and Jo{\~a}o Alexandre Leite and Ken
Satoh",
title = "Computational Logic in Multi-Agent Systems",
booktitle = "Computational Logic in Multi-Agent Systems",
conference = "3rd International Workshop",
key = "CLIMA",
pages = "x + 181",
year = "2002",
month = aug # " 1",
venue = "Copenhagen, Denmark",
series = "Datalogiske Skrifter",
volume = "93",
institution = "Roskilde University",
address = "Roskilde, Denmark",
ISSN = "0109-9779",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CiAD - Complexity in Automated Deduction
% =================================
@InProceedings{Cadoli:CiAD:2002,
author = "Marco Cadoli",
title = "The expressive power of Binary Linear Programming",
type = "Invited talk",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Gradel:CiAD:2002,
author = "Achim Blumensath and Erich Gr{\"a}del",
title = "Finite Presentations of Infinite Structures: Automata
and Interpretation",
type = "Invited talk",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Pichler:CiAD:2002,
author = "Reinhard Pichler",
title = "Algorithms and Complexity of Model Representations",
type = "Invited talk",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Stuber:CiAD:2002,
author = "Manfred Schmidt-Schau{\ss} and J{\"u}rgen Stuber",
title = "On the complexity of linear and stratified context
matching problems",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Durand:CiAD:2002,
author = "Arnaud Durand and Miki Hermann",
title = "The inference problem for propositional
circumscription of affine formulas is {coNP}-complete",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Berwanger:CiAD:2002,
author = "Dietmar Berwanger and Erich Gr{\"a}del",
title = "Fixed point formulae and solitaire games",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@InProceedings{Teran:CiAD:2002,
author = "Oswaldo Ter{\'a}n and Bruce Edmonds",
title = "Computational complexity of a constraint model-based
proof of the envelope of tendencies in a {MAS}-based
simulation model",
crossref = "CiAD:20002",
pages = "(unpaginated)",
}
@Proceedings{CiAD:20002,
editor = "Georg Gottlob and Miki Hermann and Micha{\"e}l
Rusinowitch",
title = "Complexity in Automated Deduction",
booktitle = "Complexity in Automated Deduction",
conference = "Second International Workshop",
key = "CiAD",
year = "2002",
month = jul # "25--26",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-08",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EFSD - Evolutionary Formal Software Development
% ========================================
@InProceedings{Kestrel:EFSD:2002,
author = "Matthias Anlauff and Dusko Pavlovic and Douglas R.
Smith",
title = "Title {TBA}",
type = "Invited Talk",
crossref = "EFSD:2002",
pages = "7--18",
}
@InProceedings{AndradeFiadeiro:EFSD:2002,
author = "Lu{\'\i}s Andrade and Jos{\'e} Luiz Fiadeiro",
title = "Coordination Primitives for Evolving Event-based
Systems",
crossref = "EFSD:2002",
pages = "19--29",
}
@InProceedings{Schairer:EFSD:2002,
author = "Axel Schairer",
title = "Proof Transformations for Reusing Proofs after
Changing Subformulae of Verification Conditions",
crossref = "EFSD:2002",
pages = "31--41",
}
@InProceedings{Groves:EFSD:2002,
author = "Lindsay Groves",
title = "A Formal Approach to Program Evolution",
crossref = "EFSD:2002",
pages = "43--54",
}
@InProceedings{Baumeister:EFSD:2002,
author = "Hubert Baumeister",
title = "Formal Methods and Extreme Programming",
crossref = "EFSD:2002",
pages = "55--56",
}
@InProceedings{PiccininiScollo:EFSD:2002,
author = "Nicola Piccinini and Giuseppe Scollo",
title = "The Visual Proof Manipulation Project {(VPM)}",
crossref = "EFSD:2002",
pages = "57--59",
}
@Proceedings{EFSD:2002,
editor = "Dieter Hutter and David Basin and Peter Lindsay and
Cristoph L{\"u}th",
title = "Evolutionary Formal Software Development",
booktitle = "Evolutionary Formal Software Development",
conference = "First Workshop",
key = "EFSD",
pages = "59",
year = "2002",
month = jul # "21",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-11",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FCS - Foundations of Computer Security
% ================================
@InProceedings{Kuster:FCS:2002,
author = "Ralf K{\"u}sters",
title = "On the Decidability of Cryptographic Protocols with
Open-ended Data Structures",
crossref = "FCS:2002",
pages = "3--12",
abstract = "Formal analysis of cryptographic protocols has mainly
concentrated on protocols with closed-ended data
structures, where closed-ended data structure means
that the messages exchanged between principals have
fixed and finite format. However, in many protocols the
data structures used are open-ended, i.e., messages
have an unbounded number of data fields. Formal
analysis of protocols with open-ended data structures
is one of the challenges pointed out by Meadows. This
work studies decidability issues for such protocols. We
propose a protocol model in which principals are
described by transducers, i.e., finite automata with
output, and show that in this model security is
decidable and PSPACE-hard in presence of the standard
Dolev-Yao intruder.",
}
@InProceedings{Kong-weiWing:FCS:2002,
author = "Kong-wei Lye and Jeannette M. Wing",
title = "Game Strategies In Network Security",
crossref = "FCS:2002",
pages = "13--22",
abstract = "This paper presents a game-theoretic method for
analyzing the security of computer networks. We view
the interactions between an attacker and the
administrator as a two-player stochastic game and
construct a model for the game. Using a non-linear
program, we compute the Nash equilibrium or
best-response strategies for the players (attacker and
administrator). We then explain why the strategies are
realistic and how administrators can use these results
to enhance the security of their network.",
}
@InProceedings{Conchon:FCS:2002,
author = "Sylvain Conchon",
title = "Modular Information Flow Analysis for Process
Calculi",
crossref = "FCS:2002",
pages = "23--34",
abstract = "We present a framework to extend, in a modular way,
the type systems of process calculi with
information-flow annotations that ensure a
noninterference property based on bisimulation. Our
method of adding security annotations readily supports
modern typing features, such as polymorphism and type
reconstruction, together with a non-interference proof.
Furthermore, the new systems thus obtained can detect,
for instance, information flow caused by contentions on
distributed resources, which are not detected in a
satisfactory way by using testing equivalences.",
}
@InProceedings{AppelMichaelStumpVirga:FLoC:2002:*FCS+VERIFY,
author = "Andrew W. Appel and Neophytos G. Michael and Aaron
Stump and Roberto Virga",
title = "A Trustworthy Proof Checker",
crossref = "FCS:2002",
pages = "37--48",
abstract = "Proof-Carrying Code (PCC) and other applications in
computer security require machine-checkable proofs of
properties of machine-language programs. The main
advantage of the PCC approach is that the amount of
code that must be explicitly trusted is very small: it
consists of the logic in which predicates and proofs
are expressed, the safety predicate, and the proof
checker. We have built a minimal-TCB checker, and we
explain its design principles, and the representation
issues of the logic, safety predicate, and safety
proofs. We show that the trusted code in such a system
can indeed be very small. In our current system the TCB
is less than 2,700 lines of code (an order of magnitude
smaller even than other PCC systems) which adds to our
confidence of its correctness.",
}
@InProceedings{SteelBundyDenney:FLoC:2002:*FCS+VERIFY,
author = "Graham Steel and Alan Bundy and Ewen Denney",
title = "Finding Counterexamples to Inductive Conjectures and
Discovering Security Protocol Attacks",
crossref = "FCS:2002",
pages = "49--58",
abstract = "We present an implementation of a method for finding
counterexamples to universally quantified conjectures
in first-order logic. Our method uses the proof by
consistency strategy to guide a search for a
counterexample and a standard first-order theorem
prover to perform a concurrent check for inconsistency.
We explain briefly the theory behind the method,
describe our implementation, and evaluate results
achieved on a variety of incorrect conjectures from
various sources. Some work in progress is also
presented: we are applying the method to the
verification of cryptographic security protocols. In
this context, a counterexample to a security property
can indicate an attack on the protocol, and our method
extracts the trace of messages exchanged in order to
effect this attack. This application demonstrates the
advantages of the method, in that quite complex side
conditions decide whether a particular sequence of
messages is possible. Using a theorem prover provides a
natural way of dealing with this. Some early results
are presented and we discuss future work. Keywords:
Counterexamples, Security Protocols, Non-theorems,
Proof by Consistency.",
}
@InProceedings{ArmandoCompagna:FLoC:2002:*FCS+VERIFY,
author = "Alessandro Armando and Luca Compagna",
title = "Automatic {SAT}-Compilation of Security Problems",
crossref = "FCS:2002",
pages = "59--67",
abstract = "We provide a fully automatic translation from security
protocol specifications into propositional logic which
can be effectively used to find attacks to protocols.
Our approach results from the combination of a
reduction of security problems to planning problems and
well-known SAT-reduction techniques developed for
planning. We also propose and discuss a set of
transformations on security problems whose application
has a dramatic effect on the size of the propositional
encoding obtained with our SAT-compilation technique.
We describe a model-checker for security protocols
based on our ideas and show that attacks to a set of
well-known authentication protocols are found in few
seconds by state-of-the-art SAT solvers.",
}
@InProceedings{Gollmann:FLoC:2002:*FCS+VERIFY,
author = "Dieter Gollmann",
title = "Defining Security is Difficult and Error Prone",
type = "Invited Talk",
crossref = "FCS:2002",
pages = "71",
abstract = "It is often claimed that the design of security
protocols is difficult and error prone, with formal
verification suggested as the recommended remedy.
Verification requires a formal statement of the desired
security properties and, maybe surprisingly, many
protocols are broken simply by varying the assumptions
on goals and intended environment. To defend my claim
that defining security is difficult and error prone
(and the really interesting challenge in formal
verification) I will discuss some old and new examples
of security protocols and their formal analysis.",
}
@InProceedings{Meadows:FLoC:2002:*FCS+VERIFY,
author = "Catherine Meadows",
title = "Identifying Potential Type Confusion in Authenticated
Messages",
crossref = "FCS:2002",
pages = "75--84",
abstract = "A type confusion attack is one in which a principal
accepts data of one type as data of another. Although
it has been shown by Heather et al. that there are
simple formatting conventions that will guarantee that
protocols are free from simple type confusions in which
fields of one type are substituted for fields of
another, it is not clear how well they defend against
more complex attacks, or against attacks arising from
interaction with protocols that are formatted according
to different conventions. In this paper we show how
type confusion attacks can arise in realistic
situations even when the types are explicitly defined
in at least some of the messages, using examples from
our recent analysis of the Group Domain of
Interpretation Protocol. We then develop a formal model
of types that can capture potential ambiguity of type
notation, and outline a procedure for determining
whether or not the types of two messages can be
confused. We also discuss some open issues.",
}
@InProceedings{Cohen:FLoC:2002:*FCS+VERIFY,
author = "Ernie Cohen",
title = "Proving Protocols Safe from Guessing",
crossref = "FCS:2002",
pages = "85--92",
abstract = "We describe how to prove cryptographic protocols
secure against a Dolev-Yao attacker that can also
engage in idealized offline guessing attacks. Our
method is based on constructing a first-order invariant
that bounds, in every reachable state, both the
information available to an an attacker and the steps
of guessing attacks starting from this information. We
have implemented the method as an extension to the
protocol verifier TAPS, making it the first mechanical
verifier to prove protocols secure against guessing
attacks in an unbounded model.",
}
@InProceedings{BauerLigattiWalker:FCS:2002,
author = "Lujo Bauer and Jarred Ligatti and David Walker",
title = "More Enforceable Security Policies",
crossref = "FCS:2002",
pages = "95--104",
abstract = "We analyze the space of security policies that can be
enforced by monitoring programs at runtime. Our program
``monitors'' are automata that examine the sequence the
program actions and transform the sequence when it
deviates from the specified policy. The simplest such
automaton truncates the action sequence by terminating
a program. Such automata are commonly known as
``security automata,'' and they enforce Schneider's EM
class of security policies. We define automata with
more powerful transformational abilities, including the
ability to insert a sequence of actions into the event
stream and to suppress actions in the event stream
without terminating the program. We give a
set-theoretic characterization of the policies these
new automata are able to enforce and show that they are
a superset of the EM policies.",
}
@InProceedings{LiuSmith:FCS:2002,
author = "Yu David Liu and Scott F. Smith",
title = "A Component Security Infrastructure",
crossref = "FCS:2002",
pages = "105--115",
abstract = "This paper defines a security infrastructure for
access control at the component level of programming
language design. Distributed components are an ideal
place to define and enforce significant security
policies, because components are large entities that
often define the political boundaries of computation.
Also, rather than building a security infrastructure
from scratch, we build on a standard one, the SDSI/SPKI
security architecture.",
}
@InProceedings{SkalkaSmith:FCS:2002,
author = "Christian Skalka and Scott F. Smith",
title = "Static Use-Based Object Confinement",
crossref = "FCS:2002",
pages = "117--126",
abstract = "The confinement of object references is a significant
security concern for modern programming languages. We
define a language that serves as a uniform model for a
variety of confined object reference systems. A
\emph{use-based} approach to confinement is adopted,
which we argue is more expressive than previous
communication-based approaches. We then develop a
readable, expressive type system for static analysis of
the language, along with a type soundness result
demonstrating that run-time checks can be eliminated.
The language and type system thus serve as a reliable,
declarative and efficient foundation for secure
capability-based programming and object confinement.",
}
@InProceedings{Panel:FLoC:2002:*FCS+VERIFY,
author = "Ernie Cohen and Alan Jeffrey and Fabio Martinelli and
Fabio Massacci and Catherine Meadows and David Basin",
title = "The Future of Protocol Verification",
type = "Panel",
crossref = "FCS:2002",
pages = "129",
abstract = "This panel is aimed at assessing the state of the art
and exploring trends and emerging issues in computer
security in general and protocol verification in
particular. It brings together experts from both the
security community and the verification area. Some of
questions over which they will be invited to discuss
their views, and maybe even to answer, include: What is
already solved? What still needs improvement? What are
the challenging open problems? What is the role of
automated theorem proving in protocol verification?
What else is there in computer security besides
protocol verification? A format for this panel has been
chosen as to achieve an interesting, vibrant, and
productive discussion.",
}
@Proceedings{FCS:2002,
editor = "Iliano Cervesato",
title = "Foundations of Computer Security",
booktitle = "Foundations of Computer Security",
key = "FCS",
pages = "v+130",
year = "2002",
month = jul # " 25--26",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-12",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FICS - Fixed Points in Computer Science
% ================================
@InProceedings{Winskel:FICS:2002,
author = "Glynn Winskel",
title = "Calculus for categories",
type = "Invited talk",
crossref = "FICS:2002",
pages = "1",
}
@InProceedings{Adamek:FICS:2002,
author = "Ji{\v r}{\'\i} Ad{\'a}mek and Stefan Milius and Ji{\v
r}{\'\i} Velebil",
title = "Parametric corecursion and completely iterative
monads",
crossref = "FICS:2002",
pages = "2--5",
}
@InProceedings{Ghani:FICS:2002,
author = "Neil Ghani and Christoph L{\"u}th and Federico De
Marchi",
title = "Coalgebraic approaches to algebraic terms",
crossref = "FICS:2002",
pages = "6--8",
}
@InProceedings{Uustalu:FICS:2002,
author = "Tarmo Uustalu",
title = "Generalizing substitution",
crossref = "FICS:2002",
pages = "9--11",
}
@InProceedings{Benton:FICS:2002,
author = "Nick Benton and Martin Hyland",
title = "Traced pre-monoidal categories",
crossref = "FICS:2002",
pages = "12--19",
}
@InProceedings{Aceto:FICS:2002,
author = "Luca Aceto",
title = "Kleene through the process algebraic glass",
type = "Invited talk",
crossref = "FICS:2002",
pages = "20--21",
}
@InProceedings{Sprenger:FICS:2002,
author = "Christoph Sprenger and Mads Dam",
title = "A note on global induction in a mu-calculus with
explicit approximations",
crossref = "FICS:2002",
pages = "22--24",
}
@InProceedings{Shilov:FICS:2002,
author = "Nikolay Vyacheslavovich Shilov and Natalia Olegovna
Garanina",
title = "Model checking knowledge and fixpoints",
crossref = "FICS:2002",
pages = "25--39",
}
@InProceedings{Devereux:FICS:2002,
author = "Benet Devereux",
title = "Strong next-time operators for multiple-valued
mu-calculus",
crossref = "FICS:2002",
pages = "40--43",
}
@InProceedings{Kozen:FICS:2002,
author = "Dexter Kozen",
title = "On two letters versus three",
type = "Invited talk",
crossref = "FICS:2002",
pages = "44--50",
}
@InProceedings{Leiss:FICS:2002,
author = "Hans Leiss",
title = "Kleenean semimodules and linear languages",
crossref = "FICS:2002",
pages = "51--53",
}
@InProceedings{Zhang:FICS:2002,
author = "Guo-Qiang Zhang",
title = "Decidable fragments of domain mu-calculus: an
automata-theoretic perspective",
crossref = "FICS:2002",
pages = "54--57",
}
@InProceedings{Korovina:FICS:2002,
author = "Margarita V. Korovina",
title = "Fixed points on abstract structures without the
equality test",
crossref = "FICS:2002",
pages = "58--60",
}
@InProceedings{Boudol:FICS:2002,
author = "Gerard Boudol and Pascal Zimmer",
title = "Recursion in the call-by-value $\lambda$-calculus",
crossref = "FICS:2002",
pages = "61--66",
}
@InProceedings{Labella:FICS:2002,
author = "Anna Labella",
title = "Kleene's (unary) star in nondeterministic context",
type = "Invited talk",
crossref = "FICS:2002",
pages = "67--68",
}
@InProceedings{Jensen:FICS:2002,
author = "Thomas Jensen and Florimond Ployette and Olivier
Ridoux",
title = "Iteration schemes for fixed point calculation",
crossref = "FICS:2002",
pages = "69--76",
}
@InProceedings{Santocanale:FICS:2002,
author = "Luigi Santocanale",
title = "Congruences of modal $\mu$-algebras",
crossref = "FICS:2002",
pages = "77--81",
}
@Proceedings{FICS:2002,
editor = "Zolt{\'a}n {\'E}sik and Anna Ing{\'o}lfsd{\'o}ttir",
title = "Fixed Points in Computer Science",
booktitle = "Fixed Points in Computer Science",
conference = "Preliminary Proceedings",
key = "FICS",
pages = "iv+81",
year = "2002",
month = jul # " 20--21",
venue = "Copenhagen, Denmark",
series = "BRICS Notes Series",
volume = "NS-02-2",
institution = "University of Aarhus",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FME - Formal Methods -- Getting {IT} Right
% ====================================
@InProceedings{Shankar:FLoC:2002:*FME+LICS+RTA,
author = "Natarajan Shankar",
title = "Little Engines of Proof",
type = "Invited talk",
crossref = "FME:2002",
pages = "1--20",
abstract = "The automated construction of mathematical proof is a
basic activity in computing. Since the dawn of the
field of automated reasoning, there have been two
divergent schools of thought. One school, best
represented by Alan Robinson's resolution method, is
based on simple uniform proof search procedures guided
by heuristics. The other school, pioneered by Hao Wang,
argues for problem-specific combinations of decision
and semi-decision procedures. While the former school
has been dominant in the past, the latter approach has
greater promise. In recent years, several high quality
inference engines have been developed, including
propositional satisfiability solvers, ground decision
procedures for equality and arithmetic, quantifier
elimination procedures for integers and reals, and
abstraction methods for finitely approximating problems
over infinite domains. We describe some of these
``little engines of proof'' and a few of the ways in
which they can be combined. We focus in particular on
the combination ground decision procedures and their
use in automated verification. We conclude by arguing
for a modern reinterpretation and reappraisal of Hao
Wang's hitherto neglected ideas on inferential
analysis.",
}
@InProceedings{Legeard41:FME:2002,
author = "Bruno Legeard and Fabien Peureux and Mark Utting",
title = "Automated Boundary Testing from {Z} and {B}",
crossref = "FME:2002",
pages = "21--40",
abstract = "This article present a method for black-box boundary
testing from B and Z formal specifications. The basis
of the method is to test every operation of the system
at every boundary state using all input boundary values
of that operation. The test generation process is
highly automated. It starts by calculating boundary
goals from Pre/Post predicates derived from the formal
model. Then each boundary goal is instantiated to a
reachable boundary state, by searching for a sequence
of operations that reaches the boundary goal from the
initial state. This process makes intensive use of a
set-oriented constraint technology, both for boundary
computation and to traverse the state space. The method
was designed on the basis of industrial applications in
the domain of critical software (Smart card and
transportation). Application results show the
effectiveness and the scalability of the method. In
this paper, we give an overview of the method and focus
on the calculation of the boundary goals and states.",
}
@InProceedings{Ratsaby74:FME:2002,
author = "Gil Ratsaby and Baruch Sterin and Shmuel Ur",
title = "Improvements in Coverability Analysis",
crossref = "FME:2002",
pages = "41--56",
abstract = "In simulation-based verification users are faced with
the challenge of maximizing test coverage while
minimizing testing costs. Sophisticated techniques are
used to generate clever test cases and to determine the
quality attained by the tests. The latter activity,
which is essential for locating areas of the design
that need to have more tests, is called {\em test
coverage analysis}. We have previously introduced the
notion of coverability, which refers to the degree to
which a model can be covered when subjected to testing.
We showed how a coverability analyzer enables naive
users to take advantage of the power of symbolic model
checking with a 'one-button' interface for coverability
analysis. In this work, we present several heuristics,
based on static program analysis and on simulation of
counter examples, for improving the efficiency of
coverability analysis by symbolic model checking. We
explain each heuristic independently and suggest a way
to combine them. We present an experiment that shows
improvements based on using random simulation in the
analysis of coverability.",
}
@InProceedings{Rial33:FME:2002,
author = "Juan C. Burguillo-Rial and Manuel J.
Fern{\'a}ndez-Iglesias and Francisco J.
Gonz{\'a}les-Casta{\~n}o and Mart{\'\i}n
Llamas-Nistal",
title = "Heuristic-driven Test Case Selection from Formal
Specifications. {A} Case Study",
crossref = "FME:2002",
pages = "57--76",
abstract = "We propose an approach to testing that combines formal
methods with practical criteria, close to the testing
engineer's experience. It can be seen as a framework to
evaluate and select test suites using formal methods,
assisted by informal heuristics. This proposal is
illustrated with a practical case study: the testing of
a protocol for mobile auctions in a distributed,
wireless environment.",
}
@InProceedings{Bourdonov04:FME:2002,
author = "Igor B. Bourdonov and Alexander S. Kossatchev and
Victor V. Kuliamin and Alexander K. Petrenko",
title = "{UniTesK} Test Suite Architecture",
crossref = "FME:2002",
pages = "77--88",
abstract = "The article presents the main components of the test
suite architecture underlying UniTesK test development
technology, an automated specification based test
development technology for use in industrial testing of
general-purpose software. The architecture presented
contains such elements as automatically generated
oracles, components to monitor formally defined test
coverage criteria, and test scenario specifications for
test sequence generation with the help of an automata
based testing mechanism. This work stems from the ISP
RAS results of academic research and 7-years experience
in industrial application of formal testing
techniques.",
}
@InProceedings{Oheimb36:FME:2002,
author = "David von Oheimb and Tobias Nipkow",
title = "Hoare Logic for {NanoJava}: Auxiliary Variables, Side
Effects and Virtual Methods revisited",
crossref = "FME:2002",
pages = "89--105",
abstract = "We define NanoJava, a kernel of Java tailored to the
investigation of Hoare logics. We then introduce a
Hoare logic for this language featuring an elegant new
approach for expressing auxiliary variables: by
universal quantification on the outer logical level.
Furthermore, we give simple means of handling
side-effecting expressions and dynamic binding within
method calls. The logic is proved sound and
(relatively) complete using Isabelle/HOL.",
}
@InProceedings{Bicarregui78:FME:2002,
author = "Juan Bicarregui",
title = "Do not read this",
crossref = "FME:2002",
pages = "106--125",
abstract = "We discuss the interpretation of read and write frames
in model-oriented specification taking the B's
generalised substitutions as the vehicle for the
presentation. In particular, we focus on the
interpretation of read frames, the semantics of which
have not been considered by previous authors. We gives
several examples of the relevance of read frames and
show that a substitution admits a read respecting
implementation if and only if a certain bisimulation
condition is satisfied. We use this to motivate a
richer semantic model for substitutions which
interprets read and write constraints directly in the
denotation of a substitution. This semantics yields
some non-interference results between substitutions
which cannot be given at this level without the use of
read and write frames.",
}
@InProceedings{Jorgensen55:FME:2002,
author = "Niels J{\o}rgensen",
title = "Safeness of Make-based Incremental Recompilation",
crossref = "FME:2002",
pages = "126--145",
abstract = "The {\tt make} program is widely used in large
software projects to reduce compilation time. {\tt
make} skips source files that would have compiled to
the same result as in the previous build. (Or so it is
hoped.) The crucial issue of safeness of omitting a
brute-force build is addressed by defining a semantic
model for {\tt make}. Safeness is shown to hold if a
set of criteria are satisfied, including soundness,
fairness, and completeness of makefile rules.
Conditions are established under which a makefile can
safely be modified by deleting, adding, or rewriting
rules.",
}
@InProceedings{David87:FME:2002,
author = "Sharon Barner and Shoham Ben-David and Anna Gringauze
and Baruch Sterin and Yaron Wolfsthal",
title = "An Algorithmic Approach to Design Exploration",
crossref = "FME:2002",
pages = "146--162",
abstract = "In recent years, the technique of symbolic model
checking has proven itself to be extremely useful in
the verification of hardware. However, after almost a
decade, the use of model checking techniques is still
considered complicated, and is mostly practiced by
experts. In this paper we address the question of how
model checking techniques can be made more accessible
to the hardware designer community. We introduce the
concept of {\em exploration} through model checking,
and demonstrate how, when differently tuned, the known
techniques can be used to easily obtain interesting
traces out of the model, rather than used for the
discovery of hard-to-find bugs. We present a set of
algorithms, which support the exploration flavor of
model checking.",
}
@InProceedings{Mota52:FME:2002,
author = "Alexandre Mota and Paulo Borba and Augusto Sampaio",
title = "Mechanical Abstraction of {CSP$_Z$} Processes",
crossref = "FME:2002",
pages = "163--183",
abstract = "We propose a mechanised strategy to turn an infinite
CSP$_{Z}$ process (formed of CSP and Z constructs) into
one suitable for model checking. This strategy
integrates two theories which allow us to consider the
infiniteness of CSP$_{Z}$ as two separate problems:
data independence for handling the behavioural aspect
and abstract interpretation for handling the data
structure aspect. A distinguishing feature of our
approach to abstract interpretation is the generation
of the abstract domains based on a symbolic execution
of the process.",
}
@InProceedings{Arts94:FME:2002,
author = "Thomas Arts and Clara Benac Earle and John Derrick",
title = "Verifying {E}rlang code: a resource locker
case-study",
crossref = "FME:2002",
pages = "184--203",
abstract = "In this paper we describe an industrial case-study on
the development of formally verified code for
Ericsson's AXD 301 switch. For the formal verification
of Erlang software we have developed a tool to apply
model checking to communicating Erlang processes. We
make effective use of Erlang's design principles for
large software systems to obtain relatively small
models of specific Erlang programs. By assuming a
correct implementation of the software components and
embedding their semantics into our model, we can
concentrate on the specific functionality of the
components. We constructed a tool to automatically
translate the Erlang code to a process algebra with
data. Existing tools were used to generate the full
state space and to formally verify properties stated in
the modal $\mu$-calculus. As long as the specific
functionality of the component has a finite state
vector, we can generate a finite state space, even if
the state space of the real Erlang system might be
infinite. In this paper we illustrate this by
presenting a case-study based on a piece of software in
Ericsson's AXD 301 switch, which implements a
distributed resource locker algorithm. Some of the key
properties we proved are mutual exclusion and
non-starvation for the program.",
}
@InProceedings{Huber95:FME:2002,
author = "Michael Huber and Steve King",
title = "Towards an Integrated Model Checker for Railway
Signalling Data",
crossref = "FME:2002",
pages = "204--223",
abstract = "Geographic Data for Solid State Interlocking (SSI)
systems detail site-specific behaviour of the railway
interlocking. This report demonstrates how five vital
safety properties of such data can be verified
automatically using model checking. A prototype of a
model checker for Geographic Data has been implemented
by replacing the parser and compiler of NuSMV. The
resulting tool, gdlSMV, directly reads Geographic Data
and builds a corresponding representation on which
model checking is performed using NuSMV's symbolic
model checking algorithms. Because of the large number
of elements in a typical track layout controlled by an
SSI system, a number of optimisations had to be
implemented in order to be able to verify the
corresponding data sets. We outline how most of the
model checking can be hidden from the user, providing a
simple interface that directly refers to the data being
verified.",
}
@InProceedings{Hall:FME:2002,
author = "Anthony Hall",
title = "Correctness by Construction: Integrating Formality
into a Commercial Development Process",
type = "Invited talk",
crossref = "FME:2002",
pages = "224--233",
abstract = "This paper describes a successful project where we
used formal methods as an integral part of the
development process for a system intended to meet ITSEC
E6 requirements. The system runs on commercially
available hardware and uses common COTS software. We
found that using formal methods in this way gave
benefits in accuracy and testability of the software,
reduced the number of errors in the delivered product
and was a cost-effective way of developing high
integrity software. Our experience contradicts the
belief that formal methods are impractical, or that
they should be treated as an overhead activity, outside
the main stream of development. The paper explains how
formal methods were used and what their benefits were.
It shows how formality was integrated into the process.
It discusses the use of different formal techniques
appropriate for different aspects of the design and the
integration of formal with non-formal methods.",
}
@InProceedings{Marinov68:FME:2002,
author = "Darko Marinov and Sarfraz Khurshid",
title = "{VAlloy} -- Virtual Functions Meet a Relational
Language",
crossref = "FME:2002",
pages = "234--251",
abstract = "We propose VAlloy, a veneer onto the first order,
relational language Alloy. Alloy is suitable for
modeling structural properties of object-oriented
software. However, Alloy lacks support for dynamic
dispatch, i.e., function invocation based on actual
parameter types. VAlloy introduces virtual functions in
Alloy, which enables intuitive modeling of inheritance.
Models in VAlloy are automatically translated into
Alloy and can be automatically checked using the
existing Alloy Analyzer. We illustrate the use of
VAlloy by modeling object equality, such as in Java. We
also give specifications for a part of the Java
Collections Framework.",
}
@InProceedings{Rusu17:FME:2002,
author = "Vlad Rusu",
title = "Verification using Test Generation Techniques",
crossref = "FME:2002",
pages = "252--271",
abstract = "Applying formal methods to testing has recently become
a popular research topic. In this paper we explore the
opposite approach, namely, applying testing techniques
to formal verification. The idea is to use symbolic
test generation to extract subgraphs (called {\sl
components}) from a specification and to perform the
verification on the components rather than on the whole
system. This may considerably reduce the verification
effort and, under reasonable sufficient conditions, a
safety property verified on a component also holds on
the whole specification. We demonstrate the approach by
verifying an electronic purse system using our symbolic
test generation tool STG and the PVS theorem prover.",
}
@InProceedings{Catano37:FME:2002,
author = "N{\'e}stor Cata{\~n}o and Marieke Huisman",
title = "Formal specification and static checking of {G}emplus'
electronic purse using {ESC/Java}",
crossref = "FME:2002",
pages = "272--289",
abstract = "This paper presents a case study in formal
specification of smart card programs, using ESC/Java.
It discusses an electronic purse application, provided
by Gemplus, that we have annotated with functional
specifications (i.e., pre- and post-conditions,
modifies clauses and class invariants) that are as
detailed as possible. The specification is based on the
informal documentation of the application. Using
ESC/Java, the implementation has been checked with
respect to the specification. This revealed several
errors or possibilities for improvement in the source
code (such as removing unnecessary tests). Our paper
shows that a relatively lightweight use of formal
specification techniques can already have a serious
impact on the quality of a program and its
documentation. Furthermore, we also present some ideas
on how ESC/Java could be further improved, both with
respect to specification and verification.",
}
@InProceedings{Casset13:FME:2002,
author = "Ludovic Casset",
title = "Development of an Embedded Verifier for {J}ava {C}ard
Byte Code using Formal Methods",
crossref = "FME:2002",
pages = "290--309",
abstract = "The Java security policy is implemented using security
components such as a Java Virtual Machine (JVM), API,
verifier, and a loader. It is of prime importance to
ensure that these components are implemented in
accordance with their specifications. Formal methods
can be used to bring the mathematical proof that their
implementation corresponds to their specification. In
this paper, we introduce the formal development of a
complete byte code verifier for Java Card and its
on-card integration. In particular, we aim to focus on
the model and the proof of the complete type verifier
for the Java Card language. The global architecture of
the verification process implemented in real industrial
case study is described and the detailed specification
of the type verifier is discusses as well as its proof.
Moreover, this paper presents a comparison between
formal and traditional development, summing up the pros
and cons of using formal methods in industry.",
}
@InProceedings{Backes35:FME:2002,
author = "Michael Backes and Christian Jacobi and Birgit
Pfitzmann",
title = "Deriving Cryptographically Sound Implementations Using
Composition and Formally Verified Bisimulation",
crossref = "FME:2002",
pages = "310--329",
abstract = "We consider abstract specifications of cryptographic
protocols which are both suitable for formal
verification and maintain a sound cryptographic
semantics. In this paper, we present the first abstract
specification for ordered secure message transmission
in reactive systems based on the recently published
model of Pfitzmann and Waidner. We use their
composition theorem to derive a possible implementation
whose correctness additionally involves a classical
bisimulation, which we formally verify using the
theorem prover PVS. The example serves as the first
important case study which shows that this approach is
applicable in practice, and it is the first example
that combines tool-supported formal proof techniques
with the rigorous proofs of cryptography.",
}
@InProceedings{Pahl38:FME:2002,
author = "Claus Pahl",
title = "Interference Analysis for Dependable Systems using
Refinement and Abstraction",
crossref = "FME:2002",
pages = "330--349",
abstract = "A common requirement for modern distributed and
reactive systems is a high dependability guaranteeing
reliability and security. The rigorous analysis of
dependable systems specifications is of paramount
importance for the reliability and security of these
systems. A two-layered modal specification notation
will allow the specification of services and protocols
for distributed dependable systems and their
properties. Refinement and its dual -- abstraction --
will play the key roles in an integrated development
and analysis framework. Refinement and abstraction form
the basis for an interference analysis method for
security properties and for automated test case
generation.",
}
@InProceedings{Henderson71:FME:2002,
author = "Neil Henderson and Stephen Paynter",
title = "The Formal Classification and Verification of
{S}impson's 4-slot Asynchronous Communication
Mechanism",
crossref = "FME:2002",
pages = "350--369",
abstract = "This paper critiques and extends Lamport's taxonomy of
asynchronous registers. This extended taxonomy is used
to characterise Simpson's 4-slot asynchronous
communication mechanism (ACM). A formalisation of the
Lamport atomic property and Simpson's original 4-slot
implementation is given in the PVS logic. We prove that
the 4-slot is atomic using Nipkow's retrieve relation
proof rules. A description is given of the formal
proofs, which have been discharged in the PVS theorem
prover.",
}
@InProceedings{Fidge73:FME:2002,
author = "C. J. Fidge",
title = "Timing Analysis of Assembler Code Control-Flow Paths",
crossref = "FME:2002",
pages = "370--389",
abstract = "Timing analysis of assembler code is essential to
achieve the strongest possible guarantee of correctness
for safety-critical, real-time software. Previous work
has shown how timing constraints on control-flow paths
through high-level language programs can be formalised
using the semantics of the statements comprising the
path. We extend these results to assembler-level code
where it becomes possible to not only determine timing
constraints, but also to verify them against the known
execution times for each instruction. A minimal formal
model is developed with both a weakest liberal
precondition and a strongest postcondition semantics.
However, despite the formalism's simplicity, it is
shown that complex timing behaviour associated with
instruction pipelining and iterative code can be
modelled accurately.",
}
@InProceedings{Cengarle54:FME:2002,
author = "Mar{\'\i}a Victoria Cengarle and Alexander Knapp",
title = "Towards {OCL/RT}",
crossref = "FME:2002",
pages = "390--409",
abstract = "An extension of the ``Object Constraint Language''
(OCL) for modeling real-time and reactive systems in
the ``Unified Modeling Language'' (UML) is proposed,
called OCL/RT. A general notion of events that may
carry time stamps is introduced providing means to
describe the detailed dynamic and timing behaviour of
UML software models. OCL is enriched by satisfaction
operators \texttt{@$\eta$} for referring to the value
in the history of an expression at the instant when an
event $\eta$ occurred, as well as the modalities
\texttt{always} and \texttt{sometime}. The approach is
illustrated by several examples. Finally, an
operational semantics of OCL/RT is given.",
}
@InProceedings{Garavel88:FME:2002,
author = "Hubert Garavel and Holger Hermanns",
title = "On Combining Functional Verification and Performance
Evaluation using {CADP}",
crossref = "FME:2002",
pages = "410--429",
abstract = "Considering functional correctness and performance
evaluation in a common framework is desirable, both for
scientific and economic reasons. In this paper, we
describe how the CADP toolbox, originally designed for
verifying the functional correctness of LOTOS
specifications, can also be used for performance
evaluation. We illustrate the proposed approach by the
performance study of the SCSI-2 bus arbitration
protocol.",
}
@InProceedings{Basin:FME:2002,
author = "David Basin",
title = "The Next 700 Synthesis Calculi",
type = "Invited talk",
crossref = "FME:2002",
pages = "430",
abstract = "Over the last decade I have worked with colleagues on
different projects to develop, implement, and automate
the use of calculi for program synthesis and
transformation. These projects had different
motivations and goals and differed too in the kinds of
programs synthesized (e.g., functional programs, logic
programs, and even circuit descriptions). However,
despite their differences they were all based on three
simple ideas. First, calculi can be formally derived in
a rich enough logic (e.g., higher-order logic). Second,
higher-order resolution is the central mechanism used
to synthesize programs during proofs of their
correctness. And third, synthesis proofs have a
predictable form and can be partially or completely
automated. In this talk I explain these ideas and
illustrate the general methodology employed.",
}
@InProceedings{Whalen21:FME:2002,
author = "Michael Whalen and Johann Schumann and Bernd Fischer",
title = "Synthesizing Certified Code",
crossref = "FME:2002",
pages = "431--450",
abstract = "Code certification is a lightweight approach for
formally demonstrating software quality. Its basic idea
is to require code producers to provide formal
\emph{proofs} that their code satisfies certain quality
properties. These proofs serve as \emph{certificates}
that can be checked independently. Since code
certification uses the same underlying technology as
program verification, it requires detailed annotations
(e.g., loop invariants) to make the proofs possible.
However, manually adding annotations to the code is
time-consuming and error-prone. We address this problem
by combining code certification with automatic program
synthesis. Given a high-level specification, our
approach simultaneously generates code and \emph{all}
annotations required to certify the generated code. We
describe a certification extension of {\sc AutoBayes},
a synthesis tool for automatically generating data
analysis programs. Based on built-in domain knowledge,
proof annotations are added and used to generate proof
obligations that are discharged by the automated
theorem prover E-SETHEO. We demonstrate our approach by
certifying operator- and memory-safety on a
data-classification program. For this program, our
approach was faster and more precise than PolySpace, a
commercial static analysis tool.",
}
@InProceedings{Sampaio66:FME:2002,
author = "Augusto Sampaio and Jim Woodcock and Ana Cavalcanti",
title = "Refinement in {C}ircus",
crossref = "FME:2002",
pages = "451--470",
abstract = "We describe refinement in Circus, a concurrent
specification language that integrates imperative CSP,
Z, and the refinement calculus. Each Circus process has
a state and accompanying actions that define both the
internal state transitions and the changes in control
flow that occur during execution. We define the meaning
of refinement of processes and their actions, and
propose a sound data refinement technique for process
refinement. Refinement laws for CSP and Z are directly
relevant and applicable to Circus, but our focus here
is on new laws for processes that integrate state and
control. We give some new results about the
distribution of data refinement through the combinators
of CSP. We illustrate our ideas with the development of
a distributed system of co-operating processes from a
centralised specification.",
}
@InProceedings{Cavalcanti63:FME:2002,
author = "Ana Cavalcanti and David A. Naumann",
title = "Forward simulation for data refinement of classes",
crossref = "FME:2002",
pages = "471--490",
abstract = "Simulation is the most widely used technique to prove
data refinement. We define forward simulation for a
language with recursive classes, inheritance, type
casts and tests, dynamic binding, class based
visibility, mutable state (without aliasing), and
specification constructs from refinement calculi. It is
a language based on sequential Java, but it also
includes specification and design mechanisms
appropriate for the construction of programs based on
refinement. We show simulation to be sound for data
refinement of classes in this language.",
}
@InProceedings{Wildman26:FME:2002,
author = "Luke Wildman",
title = "A Formal Basis for a Program Compilation Proof Tools",
crossref = "FME:2002",
pages = "491--510",
abstract = "This paper presents a case study in verified program
compilation from high-level language programs to
assembler code using the Cogito formal development
system. A form of window-inference based on the Z
schema is used to perform the compilation.
Data-refinement is used to change the representation of
integer variables to assembler word locations.",
}
@InProceedings{Firley49:FME:2002,
author = "Thomas Firley and Ursula Goltz",
title = "Property Dependent Abstraction of Control Structure
for Software Verification",
crossref = "FME:2002",
pages = "511--530",
abstract = "In this paper we present a technique to compute
abstract models for formal system verification. The
method reduces the state space by eliminating those
parts of a system model which are not required to check
a property. The abstract model depends on the property,
which is a formula of the next-less fragment of
$\textit{CTL}^*$. The algorithm reads a system
description, annotates it with abstract sub-models for
the statements, which are finally composed as abstract
model for the system. In the paper we introduce the
core algorithm and illustrate it by an example.",
}
@InProceedings{Ioustinova72:FME:2002,
author = "Natalia Ioustinova and Natalia Sidorova and Martin
Steffen",
title = "Closing open {SDL}-systems for model checking with
{DTSpin}",
crossref = "FME:2002",
pages = "531--548",
abstract = "Model checkers like Spin can handle closed reactive
systems, only. Thus to handle open systems, in
particular when using assume-guarantee reasoning, we
need to be able to close (sub-)systems, which is
commonly done by adding an environment process. For
models with asynchronous message-passing communication,
however, modelling the environment as separate process
will lead to a combinatorial explosion caused by all
combinations of messages in the input queues. In this
paper we describe the implementation of a tool which
automatically closes DTPromela translations of
SDL-specifications by embedding the timed chaotic
environment into the system. To corroborate the
usefulness of our approach, we compare the state space
of models closed by embedding chaos with the state
space of the same models closed with chaos as external
environment process on some simple models and on a case
study from a wireless ATM medium-access protocol.",
}
@InProceedings{Kristensen20:FME:2002,
author = "Lars Michael Kristensen and Thomas Mailund",
title = "A Generalised Sweep-Line Method for Safety
Properties",
crossref = "FME:2002",
pages = "549--567",
abstract = "The recently developed sweep-line method exploits
progress present in many concurrent systems to explore
the full state space of the system while storing only
small fragments of the state space in memory at a time.
A disadvantage of the sweep-line method is that it
relies on a strict and global notion of progress. This
prevents the method from being used for many reactive
systems. In this paper we generalise the sweep-line
method such that it can be used for verifying safety
properties of reactive systems exhibiting local
progress. The basic idea is to relax the strict notion
of progress and to recognise the situations where this
could cause the state space exploration not to
terminate. The generalised sweep-line method explores
all reachable states of the system, but may explore a
state several times. We demonstrate the practical
application of the generalised sweep-line method on two
case studies demonstrating a reduction in peak memory
usage to typically 10 \% compared to the use of
ordinary full state spaces.",
}
@InProceedings{Treharne77:FME:2002,
author = "Helen Treharne",
title = "Supplementing a {UML} development process with {B}",
crossref = "FME:2002",
pages = "568--586",
abstract = "This paper discusses our experiences of using UML and
B together through an illustrative case study. Our
approach to using UML and B centers around stereotyping
UML classes in order to identify which classes should
be modelled in B. We discuss the tensions between the
notations, and the compromises that need to be reached
in order for B to supplement a UML development. The
case study begins from the initial conception of a
library system and its use case view in order to
demonstrate how the classes were identified.",
}
@InProceedings{Dong62:FME:2002,
author = "Jin Song Dong and Jing Sun and Hai Wang",
title = "Semantic Web for Extending and Linking Formalisms",
crossref = "FME:2002",
pages = "587--606",
abstract = "The diversity of various formal specification
techniques and the need for their effective
combinations requires an extensible and integrated
supporting environment. The Web provides infrastructure
for such an environment for formal specification and
design because it allows sharing of various design
models and provides hyper textual links among the
models. Recently the Semantic Web Activity proposed the
idea of having data on the web defined and linked in a
way that it can be used for automation, extension and
integration. The success of the Semantic Web may have
profound impact on the web environment for formal
specifications, especially for extending and
integrating different formalisms. This paper
demonstrates how RDF and DAML can be used to build a
Semantic Web environment for supporting, extending and
integrating various formal specification languages.
Furthermore, the paper illustrates how RDF query
techniques can facilitate specification
comprehension.",
}
@InProceedings{Umedu40:FME:2002,
author = "Takaaki Umedu and Yoshiki Terashima and Keiichi
Yasumoto and Akio Nakata and Teruo Higashino and
Kenichi Taniguchi",
title = "A Language for Describing Wireless Mobile Applications
with Dynamic Establishment of Multi-way Synchronization
Channels",
crossref = "FME:2002",
pages = "607--624",
abstract = "In this paper, we define a new language called LOTOS/M
which enables dynamic establishment of multi-way
synchronization channels among multiple agents
(processes running on mobile hosts) on ad hoc networks,
and show how it can be applied to designing wireless
mobile applications. In LOTOS/M, a system specification
is given by a set of independent agents. When a pair of
agents is in a state capable of communicating with each
other, a synchronization relation on a given gate
(channel) list can dynamically be assigned to them by a
new facility of LOTOS/M: (i) advertisement for a
synchronization peer on a gate list and (ii)
participation in the advertised synchronization. The
synchronization relation on the same gate list can also
be assigned to multiple agents to establish a multi-way
synchronization channel incrementally so that the
agents can exchange data through the channel. When an
agent goes in a state incapable of communication, a
synchronization relation assigned to the agent is
canceled and it can run independently of the others. By
describing some examples, we have confirmed that
typical wireless mobile systems can easily be specified
in LOTOS/M, and that they can be implemented
efficiently with our LOTOS/M to Java compiler.",
}
@Proceedings{FME:2002,
editor = "Lars-Henrik Eriksson and Peter Alexander Lindsay",
title = "Formal Methods -- Getting {IT} Right",
booktitle = "Formal Methods -- Getting {IT} Right",
conference = "International Symposium of Formal Methods Europe",
key = "FME",
year = "2002",
month = jul # " 22-24",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2391",
publisher = "Springer-Verlag",
ISBN = "3-540-43928-5",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HOR - Higher-order rewriting
% ======================
@InProceedings{CirsteaKirchnerLiquoriWack:HOR:2002,
author = "Horatiu Cirstea and Claude Kirchner and Luigi Liquori
and Benjamin Wack",
title = "The rho cube: some results, some problems",
crossref = "HOR:2002",
pages = "1--2",
}
@InProceedings{Forest:HOR:2002,
author = "Julien Forest",
title = "Evaluation strategies for calculi with explicit
pattern matching and substitutions",
crossref = "HOR:2002",
pages = "3--5",
}
@InProceedings{Hamana:HOR:2002,
author = "Makoto Hamana",
title = "Term rewriting with variable binding",
crossref = "HOR:2002",
pages = "6--7",
}
@InProceedings{Leivant:HOR:2002,
author = "Daniel Leivant",
title = "Untyped term rewriting",
crossref = "HOR:2002",
pages = "8--13",
}
@InProceedings{Mellies:HOR:2002,
author = "Paul-Andr{\'e} Melli{\`e}s",
title = "On head rewriting paths in the $\lambda
\sigma$-calculus",
crossref = "HOR:2002",
pages = "14--21",
}
@InProceedings{Momigliano:HOR:2002,
author = "Alberto Momigliano",
title = "Higher-order pattern disunification revisited",
crossref = "HOR:2002",
pages = "22--24",
}
@InProceedings{SakaiKusakari:HOR:2002,
author = "Masahiko Sakai and Keiichirou Kusakari",
title = "On proving termination of higher-order rewrite systems
by the dependency pair technique",
crossref = "HOR:2002",
pages = "25",
}
@Proceedings{HOR:2002,
editor = "Delia Kesner and Tobias Nipkow and Femke van
Raamsdonk",
title = "Higher-order rewriting",
booktitle = "Higher-order rewriting",
conference = "First international workshop",
key = "HOR",
year = "2002",
month = jul # " 21",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-13",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HYLO - Hybrid Logics
% =============
@InProceedings{Paper3:HYLO:2002,
author = "Carlos Eduardo Areces and Carsten Lutz",
title = "Concrete Domains and Nominals United",
crossref = "HYLO:2002",
pages = "9--20",
}
@InProceedings{Paper8:HYLO:2002,
author = "Patrick Blackburn and Balder ten Cate",
title = "Beyond Pure Axioms: Node Creating Rules in Hybrid
Tableaux",
crossref = "HYLO:2002",
pages = "21--35",
}
@InProceedings{Paper4:HYLO:2002,
author = "Torben Bra{\"u}ner",
title = "Natural Deduction for First-Order Hybrid Logic",
crossref = "HYLO:2002",
pages = "37--51",
}
@InProceedings{Paper9:HYLO:2002,
author = "Renata de Freitas and J. Viana and P. Veloso and S.
Veloso and M. Benevides",
title = "On Hybrid Arrow Logic",
crossref = "HYLO:2002",
pages = "53--66",
}
@InProceedings{Paper5:HYLO:2002,
author = "Bernhard Heinemann",
title = "Axiomatizing Modal Theories of Subset Spaces",
crossref = "HYLO:2002",
pages = "69--83",
}
@Proceedings{HYLO:2002,
editor = "Carlos E. Areces and Patrick Blackburn and Maarten
Marx and Ulrike Sattler",
title = "Hybrid Logics",
booktitle = "Hybrid Logics",
conference = "4th Workshop",
key = "HYLO",
pages = "83",
year = "2002",
month = jul # " 25",
venue = "Copenhagen, Denmark",
note = "Privately circulated proceedings",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ICLP - Logic Programming
% =================
@InProceedings{PierreWolper:ICLP:2002,
author = "Bernard Boigelot and Pierre Wolper",
title = "Representing Arithmetic Constraints with Finite
Automata: An Overview",
type = "Invited talk",
crossref = "ICLP:2002",
pages = "1--19",
abstract = "Linear numerical constraints and their first-order
theory, whether defined over the reals or the integers,
are basic tools that appear in many areas of Computer
Science. This paper overviews a set of techniques based
on finite automata that lead to decision procedures and
other useful algorithms, as well as to a normal form,
for the first-order linear theory of the integers, of
the reals, and of the integers and reals combined. This
approach has led to an implemented tool, which has the
so far unique capability of handling the linear
first-order theory of the integers and reals
combined.",
}
@InProceedings{StefanDecker:ICLP:2002,
author = "Stefan Decker",
title = "Logic Databases on the Semantic Web: Challenges and
Opportunities",
type = "Invited talk",
crossref = "ICLP:2002",
pages = "20--21",
abstract = "Until now, the Web has mainly designed for direct
human consumption. The next step in the evolution,
dubbed the {"}Semantic Web{"}, aims at
machine-processable information, enabling intelligent
services such as information brokers, search agents,
information filters, and direct B2B communication,
which offers greater functionality and interoperability
than the current stand-alone services. \\
The development of the Semantic Web creates
opportunities and challenges for logic databases.
Languages need to be developed which allow us to
specify data transformations for various data models.
Efficient query and inference techniques have to be
developed. Technologies developed by the Logic
Programming and Deductive Database Community can help
to built the Semantic Web by overcoming these
problems.",
}
@InProceedings{21:ICLP:2002,
author = "Alessandra Russo and Rob Miller and Bashar Nuseibeh
and Jeff Kramer",
title = "An Abductive Approach for Analysing Event-Based
Requirements Specifications",
crossref = "ICLP:2002",
pages = "22--37",
abstract = "We present a logic and logic programming based
approach for analysing event-based requirements
specifications given in terms of a system's reaction to
events and safety properties. The approach uses a
variant of Kowalski and Sergot's Event Calculus to
represent such specifications declaratively and an
abductive reasoning mechanism for analysing safety
properties. Given a system description and a safety
property, the abductive mechanism is able to identify a
complete set of counterexamples (if any exist) of the
property in terms of symbolic ``current'' states and
associated event-based transitions. A case study of an
automobile cruise control system specified in the SCR
framework is used to illustrate our approach. The
technique described is implemented using existing tools
for abductive logic programming.",
}
@InProceedings{65:ICLP:2002,
author = "Tom Schrijvers and Mar{\'\i}a Garc{\'\i}a de la Banda
and Bart Demoen",
title = "Trailing Analysis for {HAL}",
crossref = "ICLP:2002",
pages = "38--53",
abstract = "The HAL language includes a Herbrand constraint solver
which uses Taylor's PARMA scheme rather than the
standard WAM representation. This allows HAL to
generate more efficient Mercury code. Unfortunately,
PARMA's variable representation requires value trailing
with a trail stack consumption about twice as large as
for the WAM. We present a trailing analysis aimed at
determining which Herbrand variables do not need to be
trailed. The accuracy of the analysis comes from HAL's
semi-optional determinism and mode declarations. The
analysis has been partially integrated in the HAL
compiler and benchmark programs show good speed-up.",
}
@InProceedings{123:ICLP:2002,
author = "Steve Barker",
title = "Access Control for Deductive Databases by Logic
Programming",
crossref = "ICLP:2002",
pages = "54--69",
abstract = "We show how logic programs may be used to protect
deductive databases from the unauthorized retrieval of
positive and negative information, and from
unauthorized insert and delete requests. To achieve
this protection, a deductive database is expressed in a
form that is guaranteed to permit only authorized
access requests to be performed. The protection of the
positive information that may be retrieved from a
database and the information that may be inserted are
treated in a uniform way as is the protection of the
negative information in the database, and the
information that may be deleted.",
}
@InProceedings{35:ICLP:2002,
author = "Michael Thielscher",
title = "Reasoning about actions with {CHR}s and Finite Domain
Constraints",
crossref = "ICLP:2002",
pages = "70--84",
abstract = "We present a CLP-based approach to reasoning about
actions in the presence of incomplete states.
Constraints expressing negative and disjunctive state
knowledge are processed by a set of special Constraint
Handling Rules. In turn, these rules reduce to standard
finite domain constraints when handling variable
arguments of single state components. Correctness of
the approach is proved against the general action
theory of the Fluent Calculus. The constraint solver is
used as the kernel of a high-level programming language
for agents that reason and plan. Experiments have shown
that the constraint solver exhibits excellent
computational behavior and scales up well.",
}
@InProceedings{67:ICLP:2002,
author = "Alexander Bockmayr and Arnaud Courtois",
title = "Using hybrid concurrent constraint programming to
model dynamic biological systems",
crossref = "ICLP:2002",
pages = "85--99",
abstract = "Systems biology is a new area in biology that aims at
achieving a systems-level understanding of biological
systems. While current genome projects provide a huge
amount of data on genes or proteins, lots of research
is still necessary to understand how the different
parts of a biological system interact in order to
perform complex biological functions. Computational
models that help to analyze, explain or predict the
behavior of biological systems play a crucial role in
systems biology. The goal of this paper is to show that
hybrid concurrent constraint programming
(Gupta/Jagadeesan/Saraswat 98) may be a promising
alternative to existing modeling approaches in systems
biology. Hybrid cc is a declarative compositional
programming language with a well-defined semantics. It
allows one to model and simulate the dynamics of hybrid
systems, which exhibit both discrete and continuous
change. We show that Hybrid cc can be used naturally to
model a variety of biological phenomena, such as
reaching thresholds, kinetics, gene interaction or
biological pathways.",
}
@InProceedings{82:ICLP:2002,
author = "Giridhar Pemmasani and C. R. Ramakrishnan and I. V.
Ramakrishnan",
title = "Efficient Real-Time Model Checking using Tabled Logic
Programming and Constraints",
crossref = "ICLP:2002",
pages = "100--114",
abstract = "Logic programming based tools for real-time model
checking are beginning to emerge. In a previous work we
had demonstrated the feasibility of building such a
model checker by combining constraint processing and
tabulation. But efficiency and practicality of such a
model checker were not adequately addressed. In this
paper we describe XMC/dbm, an efficient model checker
for real-time systems using tabling. Performance gains
in XMC/dbm directly arise from the use of a lightweight
constraint solver combined with tabling. Specifically
the timing constraints are represented by difference
bound matrices which are encoded as Prolog terms.
Operations on these matrices, the dominant component in
real-time model checking, are shared using tabling. We
provide experimental evidence that the performance of
XMC/dbm is considerably better than our previous
real-time model checker and is highly competitive to
other well known real-time model checkers implemented
in C/C++. An important aspect of XMC/dbm is that it can
handle verification of systems consisting of untimed
components with performance comparable to verification
systems built specifically for untimed systems.",
}
@InProceedings{81:ICLP:2002,
author = "Witold Charatonik and Supratik Mukhopadhyay and
Andreas Podelski",
title = "Constraint-Based Infinite Model Checking and
Tabulation for Stratified {CLP}",
crossref = "ICLP:2002",
pages = "115--129",
abstract = "We consider Gottlob, Gr{\"a}del and Veith's
translation of branching time logic into {Datalog} LITE
where \emph{global} model checking amounts to
\emph{bottom-up} query evaluation. We define the
\emph{safe} branching time logic $S\mu$ that yields the
fragment Datalog LIT under this translation, and
investigate an alternative evaluation strategy for this
fragment. Datalog LIT corresponds to Stratified CLP
when we represent the transition relation of an
infinite-state system by a CLP program. We give a
tabulation procedure for the \emph{top-down} evaluation
of stratified CLP programs and thus obtain a
constraint-based \emph{local} model checking procedure
for~$S\mu$.",
}
@InProceedings{108:ICLP:2002,
author = "Hasan M. Jamil and Gillian Dobbie",
title = "A Model Theoretic Semantics for Multi-Level Secure
Deductive Databases",
crossref = "ICLP:2002",
pages = "130--147",
abstract = "The impetus for our current research is the need to
provide an adequate framework for {\em belief
reasoning} in multi-level secure (MLS) databases. We
demonstrate that a prudent application of the concept
of {\em inheritance} in a deductive database setting
will help capture the notion of {\em declarative
belief} and belief reasoning in MLS databases in an
elegant way. In this paper, we show that these concepts
can be captured in a F-logic style declarative query
language, called {\em MultiLog}, for MLS deductive
databases for which a model theoretic semantics exists.
This development is significant from a database
perspective as it now enables us to compute the
semantics of MultiLog databases in a bottom-up fashion.
The semantics developed here is reminiscent of the
stable model semantics of logic programs with negation.
We also define a bottom-up procedure to compute unique
models of stratified MultiLog databases. Finally, we
also establish the equivalence of MultiLog's three
logical characterizations -- model theory, fixpoint
theory and proof theory.",
}
@InProceedings{115:ICLP:2002,
author = "Michael Maher",
title = "Propagation Completeness of Reactive Constraints",
crossref = "ICLP:2002",
pages = "147--162",
abstract = "We develop a framework for addressing correctness and
timeliness-of-propagation issues for reactive
constraints - global constraints or user-defined
constraints that are implemented through constraint
propagation. The notion of propagation completeness is
introduced to capture timeliness of constraint
propagation. A generalized form of arc-consistency is
formulated which unifies many local consistency
conditions in the literature. We show that propagation
complete implementations of reactive constraints
achieve this arc-consistency when propagation quiesces.
Finally, we use the framework to state and prove an
impossibility result: that CHR cannot implement a
common relation with a desirable degree of timely
constraint propagation.",
}
@InProceedings{72:ICLP:2002,
author = "Henning Makholm and Kostis Sagonas",
title = "On Enabling the {WAM} with Region Support",
crossref = "ICLP:2002",
pages = "163--178",
abstract = "Region-based memory management is an attractive
alternative to garbage collection. It relies on a
compile-time analysis to annotate the program with
explicit allocation and deallocation instructions,
where lifetimes of memory objects are grouped together
in \emph{regions}. This paper investigates how to adapt
the runtime part of region-based memory management to
the WAM setting. We present additions to the memory
architecture and instruction set of the WAM that are
necessary to implement regions. We extend an optimized
WAM-based Prolog implementation with a region-based
memory manager which supports backtracking with instant
reclamation, and cuts. The performance of region-based
execution is compared with that of the baseline
garbage-collected implementation on several benchmark
programs. A region-enabled WAM performs competitively
and often results in time and/or space improvements.",
}
@InProceedings{77:ICLP:2002,
author = "Bart Demoen",
title = "A Different Look at Garbage Collection for the {WAM}",
crossref = "ICLP:2002",
pages = "179--193",
abstract = "A non-algorithmic approach to garbage collection for
the WAM heap is developed. A set of garbage collections
compatible with the WAM is specified in two steps: the
first step makes the useful data for each continuation
private and ensures that only useful terms survive
garbage collection. The second step completes garbage
collection by extending the intuitive notion of folding
of identical structures. The role of the trail in the
folding process is crucial and it is shown for the
ordinary WAM trail as well as for a value trail. New
and unexpected opportunities for recovering memory are
discovered to be compatible with this view of garbage
collection. This approach leads to better understanding
of the usefulness logic in the WAM, it is a good start
for the formal specification of the garbage collection
process and it shows a potential for new compile time
analyses that can improve run time memory management.
Choice point trimming is used as a vehicle to show
selective liveness of data, so its relation to the more
common stack maps is established.",
}
@InProceedings{75:ICLP:2002,
author = "Bart Demoen and Phuong-Lan Nguyen and Ruben
Vandeginste",
title = "Copying Garbage Collection for the {WAM}: To Mark or
Not to Mark ?",
crossref = "ICLP:2002",
pages = "194--208",
abstract = "Garbage collection by copying is becoming more and
more popular for Prolog. Copying requires a marking
phase in order to be safe: safeness means that the
to-space is guaranteed not to overflow. However, some
systems use a copying garbage collector without marking
prior to copying, and instead postpone the copying of
potentially unsafe cells. Such systems only collect
small portions of the heap and it is not clear whether
postponing works while collecting the whole heap.
Moreover, it is shown here that postponing does not
solve the problem in a fundamental way. Since marking
takes time, it is worth studying the tradeoffs
involved. These observations have prompted the
experimentation with a series of garbage collectors
based on copying without marking and without
postponing. In particular, variants were implemented
that are named {\em dangerous}, {\em optimistic} and
{\em cautious} copying which exhibit various degrees of
unsafeness. Versions of each have been implemented
based on recursive copying as in most implementations
of {\em copy\_term/2} and on the Cheney algorithm.
Performance on benchmarks suggests that large
performance gains can be obtained by skipping the
marking phase, that dangerous copying is still
relatively safe but can be costly, and that the
additional effort of cautious copying over optimistic
copying is not worth it. The optimistic collectors
based on recursive copying perform best and slightly
better than the ones based on Cheney. Cache performance
measurements back up the benchmark results.",
}
@InProceedings{44:ICLP:2002,
author = "Harald Ganzinger and David McAllester",
title = "Logical Algorithms",
crossref = "ICLP:2002",
pages = "209--223",
abstract = "It is widely accepted that many algorithms can be
concisely and clearly expressed as logical inference
rules. However, logic programming has been
inappropriate for the study of the running time of
algorithms because there has not been a clear and
precise model of the run time of a logic program. We
present a logic programming model of computation
appropriate for the study of the run time of a wide
variety of algorithms.",
}
@InProceedings{110:ICLP:2002,
author = "Joachim Schimpf",
title = "Logical Loops",
crossref = "ICLP:2002",
pages = "224--238",
abstract = "We present a concrete proposal for enhancing Prolog
and Prolog based Constraint Logic Programming languages
with a new language construct, the logical loop. This
is a shorthand notation for the most commonly used
recursive control structure: the iteration or tail
recursion. We argue that this enhancement fits well
with the existing language concepts, enhances
productivity and maintainability, and helps newcomers
to the language by providing concepts that are familiar
from many other programming languages. The language
extension is implemented and has been in everyday use
over several years within the ECLiPSe system.",
}
@InProceedings{24:ICLP:2002,
author = "Eric Martin and Phuong Nguyen and Arun Sharma and
Frank Stephan",
title = "Learning in Logic with {RichProlog}",
crossref = "ICLP:2002",
pages = "239--254",
abstract = "Deduction and induction are unified on the basis of a
generalized notion of logical consequence, having
classical first-order logic as a particular case.
RichProlog is a natural extension of Prolog rooted in
this generalized logic, in the same way as Prolog is
rooted in classical logic. Prolog can answer $\Sigma_1$
queries as a side effect of a deductive inference.
RichProlog can answer $\Sigma_1$ queries, $\Pi_1$
queries (as a side effect of an inductive inference),
and $\Sigma_2$ queries (as a side effect of an
inductive inference followed by a deductive inference).
RichProlog can be used to learn: a learning problem is
expressed as a usual logic program, supplemented with
data, and solved by asking a $\Sigma_2$ query. The
output is correct in the limit, \emph{i.e.}, when
sufficient data have been provided.",
}
@InProceedings{26:ICLP:2002,
author = "Fran{\c c}ois Bry and Sebastian Schaffert",
title = "Towards a Declarative Query and Transformation
Language for {XML} and Semistructured Data: Simulation
Unification",
crossref = "ICLP:2002",
pages = "255--270",
abstract = "The growing importance of XML as a data interchange
standard demands languages for data querying and
transformation. Since the mid 90es, several such
languages have been proposed that are inspired from
functional languages (such as XSLT) and/or database
query languages (such as XQuery). This paper addresses
applying logic programming concepts and techniques to
designing a declarative, rule-based query and
transformation language for XML and semistructured
data. \\
The paper first introduces issues specific to XML and
semistructured data such as the necessity of flexible
``query terms'' and of ``construct terms''. Then, it is
argued that logic programming concepts are particularly
appropriate for a declarative query and transformation
language for XML and semistructured data. Finally, a
new form of unification, called ``simulation
unification'', is proposed for answering ``query
terms'', and it is illustrated on examples.",
}
@InProceedings{43:ICLP:2002,
author = "Brigitte Pientka",
title = "A Proof-Theoretic Foundation for Tabled Higher-Order
Logic Programming",
crossref = "ICLP:2002",
pages = "271--286",
abstract = "Higher-order logic programming languages such as {\em
Elf} extend first-order logic programming in two ways:
first-order terms are replaced with (dependently) typed
$\lambda$-terms and the body of clauses may contain
implication and universal quantification. In this
paper, we describe {\em tabled higher-order logic
programming} where some redundant computation is
eliminated by memoizing sub-computation and re-using
its result later. This work extends Tamaki and Sato's
search strategy based on memoization to the
higher-order setting. We give a proof-theoretic
characterization of tabling based on uniform proofs and
prove soundness of the resulting interpreter. Based on
it, we have implemented a prototype of a tabled logic
programming interpreter for {\em Elf}.",
}
@InProceedings{70:ICLP:2002,
author = "Sorin Craciunescu",
title = "Proving the Equivalence of {CLP} Programs",
crossref = "ICLP:2002",
pages = "287--301",
abstract = "This paper presents two proof systems for the
equivalence of programs. The language concerned is CLP
to which the universal quantifier is added
(CLP$\forall$). Both systems are based on first order
classical logic. \\
The first uses an induction rule and allows one to
prove that the set of finite successes of a program is
included in another program's corresponding set. The
second uses a coinduction rule for proving the
inclusion of the sets of infinite successes which
contain the finite successes. \\
Finally we show that the proof systems are equivalent
under some natural conditions.",
}
@InProceedings{71:ICLP:2002,
author = "Paola Bruscoli",
title = "A Purely Logical Account of Sequentiality in Proof
Search",
crossref = "ICLP:2002",
pages = "302--316",
abstract = "A strict correspondence between the proof-search space
of a logical formal system and computations in a simple
process algebra is established. Sequential composition
in the process algebra corresponds to a logical
relation in the the formal system---in the sense our
approach is purely logical, no axioms or encodings are
involved. The process algebra is a minimal restriction
of CCS to parallel and sequential composition; the
logical system is a minimal extension of multiplicative
linear logic. This way we get the first purely logical
account of sequentiality in proof search. Since we
restrict attention to a small meaningful fragment,
which is then of very broad interest, our techniques
should become a common basis for several possible
extensions. In particular, we argue about this work
being the first step in a two-step research for
capturing most of CCS in a purely logical fashion.",
}
@InProceedings{50:ICLP:2002,
author = "Katsumi Inoue and Chiaki Sakama",
title = "Disjunctive Explanations",
crossref = "ICLP:2002",
pages = "317--332",
abstract = "Abductive logic programming has been widely used to
declaratively specify a variety of problems in AI
including updates in data and knowledge bases, belief
revision, diagnosis, causal theory, and default
reasoning. One of the most significant issues in
abductive logic programming is to develop a reasonable
method for {\em knowledge assimilation}, which
incorporates obtained explanations into the current
knowledge base. This paper offers a solution to this
problem by considering {\em disjunctive explanations\/}
whenever multiple explanations exist. Disjunctive
explanations are then to be assimilated into the
knowledge base so that the assimilated program
preserves all and only minimal answer sets from the
collection of all possible updated programs. We
describe a new form of abductive logic programming
which deals with disjunctive explanations in the
framework of {\em extended abduction}. The proposed
framework can be well applied to view updates in
disjunctive databases.",
}
@InProceedings{12:ICLP:2002,
author = "Piero A. Bonatti",
title = "Reasoning With Infinite Stable Models {II}:
Disjunctive Programs",
crossref = "ICLP:2002",
pages = "333--346",
abstract = "The class of \emph{finitary normal logic
programs}---identified recently, by the author---makes
it possible to reason effectively with function
symbols, recursion, and infinite stable models. These
features may lead to a full integration of the standard
logic programming paradigm with the answer set
programming paradigm. For all finitary programs, ground
goals are decidable, while nonground goals are
semidecidable. Moreover, the existing engines (that
currently accept only much more restricted programs)
can be extended to handle finitary programs by
replacing their front-ends and keeping their core
inference mechanism unchanged. In this paper, the
theory of finitary normal programs is extended to
disjunctive programs. More precisely, we introduce a
suitable generalization of the notion of finitary
program and extend all the results of the authors
previous work to this class. For this purpose, a
consistency result by Fages is extended from normal
programs to disjunctive programs. We also correct an
error occurring in the previous work.",
}
@InProceedings{19:ICLP:2002,
author = "Zbigniew Lonc and Miros{\l}aw Truszczy{\'n}ski",
title = "Computing Stable Models: Worst-Case Performance
Estimates",
crossref = "ICLP:2002",
pages = "347--362",
abstract = "We study algorithms for computing stable models of
propositional logic programs and derive estimates on
their worst-case performance that are asymptotically
better than the trivial bound of $O(m 2^n)$, where $m$
is the size of an input program and $n$ is the number
of its atoms. For instance, for programs, whose clauses
consist of at most two literals (counting the head) we
design an algorithm to compute stable models that works
in time $O(m\times 1.44225^n)$. We present similar
results for several broader classes of programs, as
well.",
}
@InProceedings{57:ICLP:2002,
author = "Yannis Dimopoulos and Andreas Sideris",
title = "Towards Local Search for Answer Sets",
crossref = "ICLP:2002",
pages = "363--377",
abstract = "Answer set programming has emerged as a new important
paradigm for declarative problem solving. It relies on
algorithms that compute the stable models of a logic
program, a problem that is, in the worst-case,
intractable. Although, local search procedures have
been successfully applied to a variety of hard
computational problems, the idea of employing such
procedures in answer set programming has received very
limited attention. \\
This paper presents several local search algorithms for
computing the stable models of a normal logic program.
They are all based on the notion of a {\em conflict
set}, but use it in different ways, resulting in
different computational behaviors. The algorithms are
inspired from related work in solving propositional
satisfiability problems, suitably adapted to the stable
model semantics. The paper also discusses how the
heuristic equivalence method, that has been proposed in
the context of propositional satisfiability, can be
used in systematic search procedures that compute the
stable models of logic programs.",
}
@InProceedings{29:ICLP:2002,
author = "Pedro Cabalar",
title = "A rewriting method for Well-Founded Semantics with
Explicit Negation",
crossref = "ICLP:2002",
pages = "378--392",
abstract = "We present a modification of Brass et al's
transformation-based method for the b ottom-up
computation of well-founded semantics (WFS), in order
to cope with expl icit negation, in the sense of
Alferes and Pereira's WFSX semantics. This variat ion
consists in the simple addition of two intuitive
transformations that guaran tee the satisfaction of the
so-called {\em coherence principle}: whenever an obj
ective literal is founded, its explicit negation must
be unfounded. The main con tribution is the proof of
soundness and completeness of the resulting method wit
h respect to WFSX. Additionally, by a direct inspection
on the method, we immedi ately obtain results that help
to clarify the comparison between WFSX and regula r WFS
when dealing with explicit negation.",
}
@InProceedings{48:ICLP:2002,
author = "Grigoris Antoniou and Michael Maher",
title = "Embedding Defeasible Logic into Logic Programs",
crossref = "ICLP:2002",
pages = "393--404",
abstract = "Defeasible reasoning is a simple but efficient
approach to nonmonotonic reasoning that has recently
attracted considerable interest and that has found
various applications. Defeasible logic and its variants
are an important family of defeasible reasoning
methods. So far no relationship has been established
between defeasible logic and mainstream nonmonotonic
reasoning approaches. \\
In this paper we establish close links to known
semantics of extended logic programs. In particular, we
give a translation of a defeasible theory $D$ into a
program $P(D)$. We show that under a condition of
decisiveness, the defeasible consequences of $D$
correspond exactly to the sceptical conclusions of
$P(D)$ under the stable model semantics. Without
decisiveness, the result holds only in one direction
(all defeasible consequences of $D$ are included in all
stable models of $P(D)$). If we wish a complete
embedding for the general case, we need to use the
Kunen semantics of $P(D)$, instead.",
}
@InProceedings{58:ICLP:2002,
author = "David Pearce and Vladimir Sarsakov and Torsten Schaub
and Hans Tompits and Stefan Woltran",
title = "A Polynomial Translation of Logic Programs with Nested
Expressions into Disjunctive Logic Programs:
Preliminary Report",
crossref = "ICLP:2002",
pages = "405--420",
abstract = "Nested logic programs have recently been introduced in
order to allow for arbitrarily nested formulas in the
heads and the bodies of logic program rules under the
answer sets semantics. Previous results show that
nested logic programs can be transformed into standard
(unnested) disjunctive logic programs in an elementary
way, applying the negation-as-failure operator to body
literals only. This is of great practical relevance
since it allows us to evaluate nested logic programs by
means of off-the-shelf disjunctive logic programming
systems, like \texttt{DLV}. However, it turns out that
this straightforward transformation results in an
exponential blow-up in the worst-case, despite the fact
that complexity results indicate that there is a
polynomial translation among both formalisms. In this
paper, we take up this challenge and provide a
polynomial translation of logic programs with nested
expressions into disjunctive logic programs. Moreover,
we show that this translation is modular and (strongly)
faithful. We have implemented both the straightforward
as well as our advanced transformation; the resulting
compiler serves as a front-end to \texttt{DLV} and is
publicly available on the Web.",
}
@InProceedings{28:ICLP:2002,
author = "Henrik B{\ae}rbak Christensen",
title = "Using Logic Programming to Detect Activities in
Pervasive Healthcare",
crossref = "ICLP:2002",
pages = "421--436",
abstract = "In this experience paper we present a case study in
using logic programming in a pervasive computing
project in the healthcare domain. An expert system is
used to detect healthcare activities in a pervasive
hospital environment where positions of people and
things are tracked. Based on detected activities an
activity-driven computing infrastructure provides
computational assistance to healthcare staff on mobile-
and pervasive computing equipment. Assistance range
from simple activities like fast log-in into the
electronic patient medical record system to complex
activities like signing for medicine given to specific
patients. We describe the role of logic programming in
the infrastructure and discuss the benefits and
problems of using logic programming in a pervasive
context.",
}
@InProceedings{97:ICLP:2002,
author = "Kung-Kiu Lau and Michel Vanden Bossche",
title = "Logic Programming for Software Engineering: {A} Second
Chance",
crossref = "ICLP:2002",
pages = "437--451",
abstract = "Current trends in Software Engineering and
developments in Logic Programming lead us to believe
that there will be an opportunity for Logic Programming
to make a breakthrough in Software Engineering. In this
paper, we explain how this has arisen, and justify our
belief with a real-life application. Above all, we
invite fellow workers to take up the challenge that the
opportunity offers.",
}
@InProceedings{96:ICLP:2002,
author = "Tam{\'a}s Benk{\H o} and P{\'e}ter Krauth and
P{\'e}ter Szeredi",
title = "A Logic-based System for Application Integration",
crossref = "ICLP:2002",
pages = "452--466",
abstract = "The paper introduces the SILK tool-set, a tool-set
based on constraint logic programming techniques for
the support of application integration. We focus on the
Integrator component of SILK, which provides tools and
techniques to support the process of model evolution:
unification of the models of the information sources
and their mapping onto the conceptual models of their
user-groups. \\
We present the basic architecture of SILK and introduce
the SILK Knowledge Base, which stores the
meta-information describing the information sources.
The SILK Knowledge Base can contain both
object-oriented and ontology-based descriptions,
annotated with constraints. The constraints can be used
both for expressing the properties of the objects and
for providing mappings between them. We give a brief
introduction to SILan, the language for Knowledge Base
presentation and maintenance. We describe the
implementation status of SILK and give a simple
example, which shows how constraints and constraint
reasoning techniques can be used to support model
evolution.",
}
@InProceedings{15:ICLP:2002,
author = "Shilong Ma and Yuefei Sui and Ke Xu",
title = "The Limits of {H}orn Logic Programs",
type = "Poster",
crossref = "ICLP:2002",
pages = "467",
}
@InProceedings{17:ICLP:2002,
author = "Jes{\'u}s Medina and Enrique M{\'e}rida-Casermeiro and
Manuel Ojeda-Aciego",
title = "Multi-Adjoint Logic Programming: a Neural Net
Approach",
type = "Poster",
crossref = "ICLP:2002",
pages = "468",
}
@InProceedings{22:ICLP:2002,
author = "Claudio Vaucheret and Sergio Guadarrama and Susana
Mu{\~n}oz",
title = "Fuzzy {P}rolog: {A} Simple General Implementation
using {CLP($\mathcal{R}$)}",
type = "Poster",
crossref = "ICLP:2002",
pages = "469",
}
@InProceedings{25:ICLP:2002,
author = "Mireille Ducass{\'e} and Ludovic Langevine",
title = "Automated Analysis of {CLP(FD)} Program Execution
Traces",
type = "Poster",
crossref = "ICLP:2002",
pages = "470--471",
}
@InProceedings{31:ICLP:2002,
author = "Petr Olmer and Petr {\v S}t{\v e}p{\'a}nek",
title = "Schema-Based Transformations of Logic Programs in
$\lambda${P}rolog",
type = "Poster",
crossref = "ICLP:2002",
pages = "472",
}
@InProceedings{33:ICLP:2002,
author = "Yann Loyer and Umberto Straccia",
title = "Non-Uniform Hypothesis in Deductive Databases with
Uncertainty",
type = "Poster",
crossref = "ICLP:2002",
pages = "473--474",
}
@InProceedings{39:ICLP:2002,
author = "Nicos Angelopoulos",
title = "Probabilistic Finite Domains: {A} Brief Overview",
type = "Poster",
crossref = "ICLP:2002",
pages = "475",
}
@InProceedings{47:ICLP:2002,
author = "Prahladavaradan Sampath",
title = "Modelling Multi-Agent Reactive Systems",
type = "Poster",
crossref = "ICLP:2002",
pages = "476",
}
@InProceedings{51:ICLP:2002,
author = "Hisashi Hayashi and Kenta Cho and Akihiko Ohsuga",
title = "Integrating Planning, Action Execution, Knowledge
Updates and Plan Modifications via Logic Programming",
type = "Poster",
crossref = "ICLP:2002",
pages = "477",
}
@InProceedings{53:ICLP:2002,
author = "G{\'e}rard Ferrand and Arnaud Lallouet",
title = "A Logic Program Characterization of Domain Reduction
Approximations in Finite Domain {CSPs}",
type = "Poster",
crossref = "ICLP:2002",
pages = "478--479",
}
@InProceedings{62:ICLP:2002,
author = "Emmanuel Coquery and Fran{\c c}ois Fages",
title = "{TCLP}: overloading, subtyping and parametric
polymorphism made practical for {CLP}",
type = "Poster",
crossref = "ICLP:2002",
pages = "480",
}
@InProceedings{66:ICLP:2002,
author = "Henning Christiansen",
title = "Logical grammars based on constraint handling rules",
type = "Poster",
crossref = "ICLP:2002",
pages = "481",
}
@InProceedings{68:ICLP:2002,
author = "Mauricio Osorio and Juan Antonio Navarro and Jos{\'e}
Arrazola",
title = "Debugging in {A-Prolog}: {A} logical approach",
type = "Poster",
crossref = "ICLP:2002",
pages = "482--483",
}
@Proceedings{ICLP:2002,
editor = "Peter J. Stuckey",
title = "Logic Programming",
booktitle = "Logic Programming",
conference = "18th International Conference",
key = "ICLP",
year = "2002",
month = jul # " 29--" # aug # " 1",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2401",
publisher = "Springer-Verlag",
ISBN = "3-540-43930-7",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IMLA - Intuitionistic Modal Logic and Aplications
% ==========================================
@InProceedings{AwodeyBauer:IMLA:2002,
author = "Steve Awodey and Andrej Bauer",
title = "Propositions as [types]",
crossref = "IMLA:2002",
pages = "1--15",
abstract = "We consider a modal operator [A] on types for erasing
computational content and formalizing a notion of proof
irrelevance. We give rules for such {\em bracket types}
in dependent type theory and provide complete semantics
using regular categories and topological models. We
also show how to interpret first-order logic in type
theory with brackets, and we make use of the
translation to compare type theory with intuitionistic
first-order logic.",
}
@InProceedings{Hermida:IMLA:2002,
author = "Claudio Hermida",
title = "A categorical outlook on relational modalities and
simulations",
crossref = "IMLA:2002",
pages = "17--34",
abstract = "We characterise bicategories of spans, relations and
partial maps universally in terms of factorisations
involving maps. We apply this characterisation to show
that the standard modalities $\Box$ and $\Diamond$
arise canonically as the extension of a predicate logic
from functions to (abstract) relations. With the
resulting fibrational interpretation of modalities, we
show how to deal with representabilitiy, thereby
deriving logical predicates for the power-object and
partial-map-classifier monads. In the second part of
the article, we exhibit an intrinsic relationship
between satisfaction of modal formulae (in a transition
system) and simulations, and apply it to exhibit the
role of the biclosed nature of the bicategory of
relations in proving that observational similarity
implies similarity.",
}
@InProceedings{Bellin:IMLA:2002,
author = "Gianluigi Bellin",
title = "Towards a formal pragmatics: An intuitionistic theory
of assertive and conjectural judgements with an
extension of {G}{\"o}del, {McKinsey} and {T}arski's
{S4} translation.",
crossref = "IMLA:2002",
pages = "35--68",
abstract = "Formal pragmatics extends classical logic to
characterise the logical properties of the operators of
illocutionary force such as those expressing assertions
and obligations. Here we consider the cases of
assertions and hypothetical (or conjectural)
judgements: for a mathematical proposition A, the
assertion of A is justified by the availability of a
proof of A, while conjectural assertion is justified by
the the absence of a refutation of A. We give a unitary
sequent calculus with subsystems characterizing
intuitionistic and a fragment of classical reasoning
with such operators. Extending G{\"o}dels's and
{McKinsey} and A. Tarski's translation of
intuitionistic logic into S4, we prove soundness and
completeness of our sequent calculus with respect to
the S4 semantics.",
}
@InProceedings{Brunet:IMLA:2002,
author = "Olivier Brunet",
title = "A Modal Logic for Observation-Based Knowledge
Representation",
crossref = "IMLA:2002",
pages = "69--81",
abstract = "In this paper we introduce and explore ways to include
a notion of partiality of information in knowledge
representation formalisms. This leads to the definition
of an algebraic structure based on the notion of
observation and partial representation, and study the
logical behaviour of those structures.",
}
@InProceedings{DevorenCoulthardMoorGoreNerode:IMLA:2002,
author = "J. M. Davoren and V. Coulthard and T. Moor and Rajeev
P. Gor{\'e} and A. Nerode",
title = "Topological semantics for Intuitionistic modal logics,
and spatial discretisation by {A/D} maps",
crossref = "IMLA:2002",
pages = "83--100",
abstract = "The contribution of this paper is threefold. First, we
take the well-known Intuitionistic modal logic of
Fischer-Servi with semantics in birelational Kripke
frames, and give the natural extension to topological
Kripke frames where the frame conditions relating the
Intuitionistic partial order with the modal relation
generalise to semi-continuity properties of the
relation with respect to the topology. Second, we
develop the theory of an interesting class of
topologies arising from spatial discretisation by
finitary covers; the motivating case is covers of
Euclidean space. We use the name ``A/D map'' to
designate covers of a space whose cover cells do not
generate any infinite descending chains; for
analog-to-digital conversion, were one seeks a
discretised view of a continuous world via the cells of
a cover, the limits of discernment should be finite.
Third, we give a novel application of Intuitionistic
semantics to the problem of approximate model-checking
of classical modal formulas in models where the exact
evaluation of denotation sets is not possible; such
models are the norm in applications of modal logics to
the formal analysis and design of hybrid (mixed
continuous and discrete) dynamical systems. The main
result of the paper is that for the positive fragment
of a modal language generated from a finite set of
atomic propositions, we can give general lower and
upper bounds on the classical denotation set of a
formula in a given model. Moreover, these bounds are
the Intuitionistic denotation sets of the same formula
in two different models, where the lower and upper
Intuitionistic models are built from an A/D map and
have finitary quotients.",
}
@InProceedings{MaiettiRitter:IMLA:2002,
author = "Maria Emilia Maietti and Eike Ritter",
title = "Modal Run-Time Analysis Revisited",
crossref = "IMLA:2002",
pages = "101--102",
abstract = "To perform run-time analysis we present an implicit
modal type system as a variation of Davies and
Pfenning's modal one. We replace the use of the
modality by adding a new arrow type to abstract on a
compile-time assumption. For it we provide both a
categorical semantics and an operational semantics with
a two stage evaluation.",
}
@Proceedings{IMLA:2002,
editor = "Michael Mendler and Rajeev P. Gor{\'e} and Valeria de
Paiva",
title = "Intuitionistic Modal Logic and Aplications",
booktitle = "Intuitionistic Modal Logic and Aplications",
conference = "Preliminary proceedings",
key = "IMLA",
year = "2002",
month = jul # " 26",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-15",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LFM - Logical Frameworks and Meta-Languages
% =====================================
@InProceedings{LopezPimentelHodasPolakowStoilova:LFM:2002,
author = "Pablo L{\'o}pez and Ernesto Pimentel and Joshua S.
Hodas and Jeffrey Polakow and Lubomira Stoilova",
title = "Isolating Resource Comsumption in Linear-Logic Proof
Search",
crossref = "LFM:2002",
pages = "1--8",
abstract = "This work presents an extension of the Tag-Frame
resource management system previously developed by the
authors. The extended system is able to isolate the
consumption of a given goal/clause without incurring
extra runtime costs. We believe this feature may help
in debugging linear logic programs and specifications
in a proof-theoretic setting.",
}
@InProceedings{VanderwaartCrary:LFM:2002,
author = "Joseph C. Vanderwaart and Karl Crary",
title = "A Simplified Account of the Metatheory of Linear
{LF}",
crossref = "LFM:2002",
pages = "9--26",
abstract = "We present a variant of the linear logical framework
LLF that avoids the restriction that well-typed terms
be in pre-canonical form and adds $\lambda$-abstraction
at the level of families. We abandon the use of
$\beta$-conversion as definitional equality in favor of
a set of typed definitional equality judgments that
include rules for parallel conversion and
extensionality. We show type-checking is decidable by
giving an algorithm to decide definitional equality for
well-typed terms and showing the algorithm is sound and
complete. The algorithm and the proof of its
correctness are simplified by the fact that they apply
only to well-typed terms and may therefore ignore the
distinction between intuitionistic and linear
hypotheses.",
}
@InProceedings{StumpDill:LFM:2002,
author = "Aaron Stump and David L. Dill",
title = "Producing Proofs from an Arithmetic Decision Procedure
in Elliptical {LF}",
crossref = "LFM:2002",
pages = "27--40",
abstract = "Software that can produce independently checkable
evidence for the correctness of its output has received
recent attention for use in certifying compilers and
proof-carrying code. CVC (``a Cooperating Validity
Checker) is a proof-producing validity checker for a
decidable fragment of first-order logic enriched with
background theories. This paper describes how proofs of
valid formulas are produced from the decision procedure
for linear real arithmetic implemented in CVC. It is
shown how extensions to LF which support proof rules
schematic in an arity (``elliptical'' rules) are very
convenient for this purpose.",
}
@InProceedings{RaamsdonkSeveri:LFM:2002,
author = "Femke van Raamsdonk and Paula Severi",
title = "Eliminating Proofs from Programs",
crossref = "LFM:2002",
pages = "41--58",
abstract = "This paper presents a step in the development of an
operational approach to program extraction in type
theory. In order to get a program from a lambda term,
the logical parts need to be removed. This is done by a
reduction relation $\rightarrow_{\epsilon}$. We study
the combination of $\beta$-reduction and
$\epsilon$-reduction, both in the setting of simply
typed lambda calculus and for pure type systems. In the
general setting the properties confluence, subject
reduction, and strong normalization are studied.",
}
@InProceedings{MomiglianoAmblerCrole:LFM:2002,
author = "Alberto Momigliano and Simon J. Ambler and Roy L.
Crole",
title = "A Hybrid Encoding of {Howe's} Method for Establishing
Congruence of Bisimilarity",
crossref = "LFM:2002",
pages = "59--72",
abstract = "We give a short description of Hybrid, a new tool for
automated theorem proving, which was recently
introduced by the authors. It provides a form of Higher
Order Abstract Syntax (HOAS) combined consistently with
induction and coinduction. We present a case study
illustrating the use of Hybrid for reasoning about the
lazy $\lambda$-calculus. In particular, we prove that
the standard notion of simulation is a precongruence.
Although such a proof is not new, the development is
non-trivial, and we attempt to illustrate the
advantages of using Hybrid, as well as some issues
which will be addressed as further work.",
}
@InProceedings{ScagnettoMiculan:LFM:2002,
author = "Ivan Scagnetto and Marino Miculan",
title = "{A}mbient {C}alculus and its Logic in the {C}alculus
of {I}nductive {C}onstructions",
crossref = "LFM:2002",
pages = "73--92",
abstract = "The Ambient Calculus has been recently proposed as a
model of mobility of agents in a dynamically changing
hierarchy of domains. In this paper, we describe the
implementation of the theory and metatheory of Ambient
Calculus and its modal logic in the Calculus of
Inductive Constructions. We take full advantage of
Higher-Order Abstract Syntax, using the \emph{Theory of
Contexts} as a fundamental tool for developing formally
the metatheory of the object system. Among others, we
have successfully proved a set of \emph{fresh
renamings} properties, and formalized the connection
between the Theory of Contexts and Gabbay-Pitts'
``new'' quantifier. As a feedback, we introduce a new
definition of satisfaction for the Ambients logic and
derive some of the properties originally assumed as
axioms in the Theory of Contexts.",
}
@InProceedings{Delahaye:LFM:2002,
author = "David Delahaye",
title = "A Proof Dedicated Meta-Language",
crossref = "LFM:2002",
pages = "93--106",
abstract = "We describe a proof dedicated meta-language, called
${\cal L}_{tac}$, in the context of the {\sf Coq} proof
assistant. This new layer of meta-language is quite
appropriate to write small and local automations.
${\cal L}_{tac}$ is essentially a small functional core
with recursors and powerful pattern-matching operators
for {\sf Coq} terms but also for proof contexts. As
${\cal L}_{tac}$ is not complete, we describe an
interface between ${\cal L}_{tac}$ and the full
programmable meta-language of the system ({\sf
Objective~Caml}), which is also the implementation
language. This interface is based on a quotation system
where we can use ${\cal L}_{tac}$'s syntax in {\sf ML}
files, and where it is possible to insert {\sf ML} code
in ${\cal L}_{tac}$ scripts by means of antiquotations.
In that way, the two meta-languages are not opposed and
we give an example where they fairly cooperate. Thus,
this shows that a {\sf LCF}-like system with a
two-level meta-language is completely realistic.",
}
@InProceedings{Pientka:LFM:2002,
author = "Brigitte Pientka",
title = "Memoization-based Proof Search in {LF}: An
Experimental Evaluation of a Prototype",
crossref = "LFM:2002",
pages = "107--120",
abstract = "{\em Elf} is a general meta-language for the
specification and implementation of logical systems in
the style of the logical framework LF. Proof search in
this framework is based on the operational semantics of
logic programming. In this paper, we discuss
experiments with a prototype for memoization-based
proof search for {\em Elf} programs. We compare the
performance of memoization-based proof search,
depth-first search and iterative deepening search using
two applications: 1) Bi-directional type-checker with
subtyping and intersection types, and 2) parsing of
formulas into higher-order abstract syntax. These
experiments indicate that memoization-based proof
search is a practical and overall more efficient
alternative to depth-first and iterative deepening
search.",
}
@InProceedings{SchurmannAutexier:LFM:2002,
author = "Carsten Sch{\"u}rmann and Serge Autexier",
title = "Towards Proof Planning for {${\cal M}_{\omega}^+$}",
crossref = "LFM:2002",
pages = "121--142",
abstract = "This paper describes the proof planning system
$\mathcal{P}_{\omega}^{+}$ for the meta theorem prover
for LF implemented in Twelf. The main contributions
include a formal system that approximates the flow of
information between assumptions and goals within a meta
proof, a set of inference rules to reason about those
approximations, and a soundness proof that guarantees
that the proof planner does not reject promising proof
states.",
}
@Proceedings{LFM:2002,
editor = "Frank Pfenning",
title = "Logical Frameworks and Meta-Languages",
booktitle = "Logical Frameworks and Meta-Languages",
conference = "Preliminary Proceedings of the 3rd International
Workshop",
key = "LFM",
pages = "iv+142",
year = "2002",
month = jul # " 26",
venue = "Copenhagen, Denmark",
URL = "http://www-2.cs.cmu.edu/~lfm02/papers/",
note = "Final version to be published in \textit{Electronic
Notes in Theoretical Computer Science} 70(2), Elsevier
Science",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LICS - Logic in Computer Science
% =========================
@InProceedings{Shankar:FLoC:2002:FME+*LICS+RTA,
author = "Natarajan Shankar",
title = "Little Engines of Proof",
type = "Invited talk",
crossref = "LICS:2002",
pages = "3",
abstract = "The automated construction of mathematical proof is a
basic activity in computing. Since the dawn of the
field of automated reasoning, there have been two
divergent schools of thought. One school, best
represented by Alan Robinson's resolution method, is
based on simple uniform proof search procedures guided
by heuristics. The other school, pioneered by Hao Wang,
argues for problem-specific combinations of decision
and semi-decision procedures. While the former school
has been dominant in the past, the latter approach has
greater promise. In recent years, several high quality
inference engines have been developed, including
propositional satisfiability solvers, ground decision
procedures for equality and arithmetic, quantifier
elimination procedures for integers and reals, and
abstraction methods for finitely approximating problems
over infinite domains. We describe some of these
``little engines of proof'' and a few of the ways in
which they can be combined. We focus in particular on
the combination ground decision procedures and their
use in automated verification. We conclude by arguing
for a modern reinterpretation and reappraisal of Hao
Wang's hitherto neglected ideas on inferential
analysis.",
}
@InProceedings{Lynch:LICS:2002,
author = "Christopher Lynch and Barbara Morawska",
title = "Automatic Decidability",
crossref = "LICS:2002",
pages = "7--16",
abstract = "We give a saturation procedure that takes a theory as
input, and returns a decision procedure for that theory
when it halts. In addition, it returns a bound on the
complexity of that theory: $O(n lg(n))$ for some
theories (such as the theory of lists), polynomial for
other theories (including all equational theories that
halt), and simply exponential for other theories (such
as the theory of arrays).",
}
@InProceedings{Clarke:LICS:2002,
author = "Edmund M. Clarke and Somesh Jha and Yuan Lu and Helmut
Veith",
title = "Tree-like Counterexamples in Model Checking",
crossref = "LICS:2002",
pages = "19--29",
abstract = "Counterexamples for specification violations provide
engineers with important debugging information.
Although counterexamples are considered one of the main
advantages of model checking, state-of the art model
checkers are restricted to relatively simple
counterexamples, and surprisingly little research
effort has been put into counterexamples. In this
paper, we introduce a new general framework for
counterexamples. The paper has three main
contributions: \\
(i) We determine the general form of ACTL
counterexamples. To this end, we investigate the notion
of counterexample and show that a large class of
temporal logics beyond ACTL admits counterexamples with
a simple tree-like transition relation. We show that
the existence of tree-like counterexamples is related
to a universal fragment of extended branching time
logic based on omega-regular temporal operators. \\
(ii) We present new symbolic algorithms to generate
tree-like counterexamples for ACTL specifications.
\\
(iii) Based on tree-like counterexamples we extend the
abstraction refinement methodology developed recently
by Clarke et al. (CAV'2000) to full ACTL. This
demonstrates the conceptual simplicity and elegance of
tree-like counterexamples.",
}
@InProceedings{Peyronnet:LICS:2002,
author = "Sophie Laplante and Richard Lassaigne and Frederic
Magniez and Sylvain Peyronnet and Michel de Rougemont",
title = "Probabilistic abstraction for model checking: An
approach based on property testing",
crossref = "LICS:2002",
pages = "30--39",
abstract = "The goal of model checking is to verify the
correctness of a given program, on all of its inputs.
The main obstacle, in many cases, is the intractably
large size of the program's transition system. Property
testing is a randomized method to verify whether some
fixed property holds on individual inputs. This is done
by looking at a small random part of that input. We
join the strengths of both approaches by introducing a
new notion of probabilistic abstraction, and by
extending the framework of model checking to include
the use of these abstractions. Our abstractions map
transition systems associated with large graphs to
small transition systems associated with small random
subgraphs. This reduces the original transition system
to a family of small, even constant-size, transition
systems. We prove that with high probability,
``sufficiently'' incorrect programs will be rejected
(eps-robustness). We also prove that under a certain
condition (exactness), correct programs will never be
rejected (congruence). Our work applies to programs for
graph properties such as bipartiteness,
$k$-colorability, or any $\exists\forall$ first order
graph properties. Our main contribution is to show how
to apply the ideas of property testing to syntactic
programs for such properties. We give a concrete
example of an abstraction for a program for
bipartiteness. Finally, we show that the relaxation of
the test alone does not yield transition systems small
enough to use the standard model checking method. More
specifically, we prove, using methods from
communication complexity, that the OBDD size remains
exponential for approximate bipartiteness.",
}
@InProceedings{RepsLoginovSagiv:LICS:2002,
author = "Thomas Reps and Alexey Loginov and Mooly Sagiv",
title = "Semantic Minimization of 3-Valued Propositional
Formulae",
crossref = "LICS:2002",
pages = "40--51",
abstract = "This paper presents an algorithm for a non-standard
logic-minimization problem that arises in 3-valued
propositional logic. The problem is motivated by the
potential for obtaining better answers in applications
that use 3-valued logic. An answer of 0 or 1 provides
precise (definite) information; an answer of 1/2
provides imprecise (indefinite) information. By
replacing a formula phi with a ``better'' formula psi,
we may improve the precision of the answers obtained.
In this paper, we give an algorithm that always
produces a formula that is ``best'' (in a certain
well-defined sense).",
}
@InProceedings{Reynolds:LICS:2002,
author = "John C. Reynolds",
title = "{Separation Logic: A Logic for Shared Mutable Data
Structures}",
type = "Invited lecture",
crossref = "LICS:2002",
pages = "55--74",
abstract = "In joint work with Peter O'Hearn and others, based on
early ideas of Burstall, we have developed an extension
of Hoare logic that permits reasoning about low-level
imperative programs that use shared mutable data
structure.\\
The simple imperative programming language is extended
with commands (not expressions) for accessing and
modifying shared structures, and for explicit
allocation and deallocation of storage. Assertions are
extended by introducing a ``separating conjunction''
that asserts that its subformulas hold for disjoint
parts of the heap, and a closely related ``separating
implication''. Coupled with the inductive definition of
predicates on abstract data structures, this extension
permits the concise and flexible description of
structures with controlled sharing.\\
In this paper, we will survey the current development
of this program.",
}
@InProceedings{Ahmed:LICS:2002,
author = "Amal Ahmed and Andrew W. Appel and Roberto Virga",
title = "A Stratified Semantics of General References
Embeddable in Higher-Order Logic",
crossref = "LICS:2002",
pages = "75--86",
abstract = "We demonstrate a semantic model of general references
--- that is, mutable memory cells that may contain
values of any (statically-checked) closed type,
including other references. Our model is in terms of
execution sequences on a von Neumann machine; thus, it
can be used in a Proof-Carrying Code system where the
skeptical consumer checks even the proofs of the typing
rules. The model allows us to prove a frame-axiom
introduction rule that allows locality of specification
and reasoning, even in the event of updates to aliased
locations. Our proof is machine-checked in the Twelf
metalogic.",
}
@InProceedings{HamidShao:LICS:2002,
author = "Nadeem A. Hamid and Zhong Shao and Valery Trifonov and
Stefan Monnier and Zhaozhong Ni",
title = "A Syntactic Approach to Foundational Proof-Carrying
Code",
crossref = "LICS:2002",
pages = "89--100",
abstract = "Proof-Carrying Code (PCC) is a general framework for
verifying the safety properties of machine-language
programs. PCC proofs are usually written in a logic
extended with language-specific typing rules. In
Foundational Proof-Carrying Code (FPCC), on the other
hand, proofs are constructed and verified using
strictly the foundations of mathematical logic, with no
type-specific axioms. FPCC is more flexible and secure
because it is not tied to any particular type system
and it has a smaller trusted base. \\
Foundational proofs, however, are much harder to
construct. Previous efforts on FPCC all required
building sophisticated semantic models for types. In
this paper, we present a syntactic approach to FPCC
that avoids the difficulties of previous work. Under
our new scheme, the foundational proof for a typed
machine program simply consists of the typing
derivation plus the formalized syntactic soundness
proof for the underlying type system. We give a
translation from a typed assembly language into FPCC
and demonstrate the advantages of our new system via an
implementation in the Coq proof assistant.",
}
@InProceedings{JeffreyRathke:LICS:2002,
author = "Alan Jeffrey and Julian Rathke",
title = "A fully abstract may testing semantics for concurrent
objects",
crossref = "LICS:2002",
pages = "101--112",
abstract = "This paper provides a fully abstract semantics for a
variant of the concurrent object calculus. We define
may testing for concurrent object components and then
characterise it using a trace semantics inspired by UML
interaction diagrams. The main result of this paper is
to show that the trace semantics is fully abstract for
may testing. This is the first such result for a
concurrent object language.",
}
@InProceedings{ReusStreicher:LICS:2002,
author = "Bernhard Reus and Thomas Streicher",
title = "Semantics and Logic of Object Calculi",
crossref = "LICS:2002",
pages = "113--122",
abstract = "The main contribution of this paper is a formal
characterization of recursive object specifications
based on a denotational untyped semantics of Abadi \&
Cardelli's object calculus and the discussion of
existence of those (recursive) specifications. The
existence theorem uses domain theoretical machinery
known from the work of Freyd and Pitts. The semantics
is then applied to prove soundness of a programming
logic for the object calculus (by Abadi \& Leino) and
to suggest possible extensions. For the purposes of
this discussion we use an informal logic of predomains
in order to avoid any commitment to a special syntax of
specification logic.",
}
@InProceedings{PalsbergZhao:LICS:2002,
author = "Jens Palsberg and Tian Zhao",
title = "Efficient Type Inference for Record Concatenation and
Subtyping",
crossref = "LICS:2002",
pages = "125--136",
abstract = "Record concatenation, multiple inheritance, and
multiple-object cloning are closely related and part of
various language designs. For example, in Cardelli's
untyped Obliq language, a new object can be constructed
from several existing objects by cloning followed by
concatenation; an error is given in case of field name
conflicts. Type systems for record concatenation have
been studied by Wand, Harper and Pierce, Remy, and
others; and type inference for the combination of
record concatenation and subtyping has been studied by
Sulzmann and by Pottier. In this paper we present the
first polynomial-time type inference algorithm for
record concatenation, subtyping, and recursive types.
Our example language is the Abadi-Cardelli object
calculus extended with a concatenation operator. The
type inference algorithm runs in $O(n^5)$ time where
$n$ is the size of the program. Our algorithm enables
efficient type checking of Obliq programs without
changing the programs at all.",
}
@InProceedings{FrischCastagnaBenzaken:LICS:2002,
author = "Alain Frisch and Giuseppe Castagna and V{\'e}ronique
Benzaken",
title = "Semantic subtyping",
crossref = "LICS:2002",
pages = "137--146",
abstract = "Usually subtyping relations are defined either
syntactically by a formal system or semantically by an
interpretation of types in an untyped denotational
model. In this work we show how to define a subtyping
relation semantically, for a language whose operational
semantics is driven by types; we consider a rich type
algebra, with product, arrow, recursive, intersection,
union and complement types. Our approach is to
``bootstrap'' the subtyping relation through a notion
of set-theoretic model of the type algebra. The
advantages of the semantic approach are manifold.
Foremost we get ``for free'' many properties (e.g., the
transitivity of subtyping) that, with axiomatized
subtyping, would require tedious and error prone
proofs. Equally important is that the semantic approach
allows one to derive complete algorithms for the
subtyping relation or the propagation of types through
patterns. As the subtyping relation has a natural
(inasmuch as semantic) interpretation, the type system
can give informative error messages when static
type-checking fails. Last but not least the approach
has an immediate impact in the definition and the
implementation of languages manipulating XML documents,
as this was our original motivation.",
}
@InProceedings{BalatCosmoFiore:LICS:2002,
author = "Marcelo Fiore and Roberto Di Cosmo and Vincent Balat",
title = "Remarks on Isomorphisms in Typed Lambda Calculi with
Empty and Sum Types",
crossref = "LICS:2002",
pages = "147--156",
abstract = "Tarski asked whether the arithmetic identities taught
in high school are complete for showing all arithmetic
equations valid for the natural numbers. The answer to
this question for the language of arithmetic
expressions using a constant for the number one and the
operations of product and exponentiation is
affirmative, and the complete equational theory also
characterises isomorphism in the typed lambda calculus,
where the constant for one and the operations of
product and exponentiation respectively correspond to
the unit type and the product and arrow type
constructors. This paper studies isomorphisms in typed
lambda calculi with empty and sum types from this
viewpoint. We close an open problem by establishing
that the theory of type isomorphisms in the presence of
product, arrow, and sum types (with or without the unit
type) is not finitely axiomatisable. Further, we
observe that for type theories with arrow, empty and
sum types the correspondence between isomorphism and
arithmetic equality generally breaks down, but that it
still holds in some particular cases including that of
type isomorphism with the empty type and equality with
zero.",
}
@InProceedings{Statman:LICS:2002,
author = "Richard Statman",
title = "On the lambda {Y} calculus",
crossref = "LICS:2002",
pages = "159--166",
abstract = "We show that the word problem for the lambda Y
calculus is undecidable and related results.",
}
@InProceedings{FaellaTorreMurano:LICS:2002,
author = "Marco Faella and Salvatore La Torre and Aniello
Murano",
title = "Dense Real-time Games",
crossref = "LICS:2002",
pages = "167--176",
abstract = "The rapid development of complex and safety-critical
systems requires the use of reliable verification
methods and tools for system design (synthesis). Many
systems of interest are reactive, in the sense that
their behavior depends on the interaction with the
environment. A natural framework to model them is a
two-player game: the system versus the environment. In
this context, the central problem is to determine the
existence of a winning strategy according to a given
winning condition.\\
We focus on real-time systems, and choose to model the
related game as a nondeterministic timed automaton. We
express winning conditions by formulas of the
branching-time temporal logic TCTL. While timed games
have been studied in the literature, timed games with
dense-time winning conditions constitute a new research
topic.\\
The main result of this paper is an exponential-time
algorithm to check for the existence of a winning
strategy for TCTL games where equality is not allowed
in the timing constraints. Our approach consists on
translating to timed tree automata both the game graph
and the winning condition, thus reducing the considered
decision problem to the emptiness problem for this
class of automata. The proposed algorithm matches the
known lower bound on timed games. Moreover, if we relax
the limitation we have placed on the timing
constraints, the problem becomes undecidable.",
}
@InProceedings{Dima:LICS:2002,
author = "Catalin Dima",
title = "Computing reachability relations in timed automata",
crossref = "LICS:2002",
pages = "177--186",
abstract = "We give an algorithmic calculus of the reachability
relations on clock values defined by timed automata.
Our approach is a modular one, by computing unions,
compositions and reflexive-transitive closure (star) of
``atomic'' relations. The essential tool is a new
representation technique for n-clock relations -- the
2n-automata -- and our strategy is to show the closure
under union, concatenation and star of the class of
2n-automata that represent reachability relations in
timed automata.",
}
@InProceedings{Gottlob:LICS:2002,
author = "Georg Gottlob and Christoph Koch",
title = "{Monadic Queries over Tree-Structured Data}",
type = "Invited lecture",
crossref = "LICS:2002",
pages = "189--202",
}
@InProceedings{BenediktLibkin:LICS:2002,
author = "Michael Benedikt and Leonid Libkin",
title = "Tree Extension Algebras: Logics, Automata, and Query
Languages",
crossref = "LICS:2002",
pages = "203--212",
abstract = "We study relations on trees defined by first-order
constraints over a vocabulary that includes the {\it
tree extension relation} $T \prec T'$, holding if and
only if every branch of $T$ extends to a branch of
$T'$, unary node-tests, and a binary relation checking
if the domains of two trees are equal. We show that
from such a formula one can generate a tree automaton
that accepts the set of tuples of trees defined by the
formula, and conversely that every automaton over
tree-tuples is captured by such a formula. We look at
the fragment with only extension inequalities and leaf
tests, and show that it corresponds to a new class of
automata on tree tuples, which is strictly weaker then
general tree-tuple automata. We use the automata
representations to show separation and expressibility
results for formulae in the logic. We then turn to
relational calculi over the logic defined here: that
is, from constraints we extend to queries that have
second-order parameters for a finite set of tree
tuples. We give normal forms for queries, and use these
to get bounds on the data complexity of query
evaluation, showing that while general query evaluation
is unbounded within the polynomial hierarchy, generic
query evaluation has very low complexity, giving strong
bounds on the expressive power of relational calculi
with tree extension constraints. We also give normal
forms for safe queries in the calculus.",
}
@InProceedings{FrickGrohe:LICS:2002,
author = "Markus Frick and Martin Grohe",
title = "The complexity of first-order and monadic second-order
logic revisited",
crossref = "LICS:2002",
pages = "215--224",
abstract = "The model-checking problem for a logic L on a class C
of structures asks whether a given L-sentence holds in
a given structure in C. In this paper, we give
super-exponential lower bounds for fixed-parameter
tractable model-checking problems for first-order and
monadic second-order logic. \\
We show that unless PTIME = NP, the model-checking
problem for monadic second-order logic on finite words
is not solvable in time $f(k)p(n)$, for any elementary
function $f$ and any polynomial $p$. Here $k$ denotes
the size of the input sentence and $n$ the size of the
input word. We prove the same result for first-order
logic under a stronger complexity theoretic assumption
from parameterized complexity theory. \\ Furthermore,
we prove that the model-checking problems for
first-order logic on structures of degree $2$ and of
bounded degree $d$ greater or equal $3$ are not
solvable in time $2^{2^{o(k)}} p(n)$ (for degree $2$)
and $2^{2^{2^{o(k)}}} p(n)$ (for degree $d$), for any
polynomial $p$, again under an assumption from
parameterized complexity theory. We match these lower
bounds by corresponding upper bounds.",
}
@InProceedings{Bars:LICS:2002,
author = "Jean-Marie Le Bars",
title = "The 0-1 law fails for frame satisfiability of
propositional modal logic",
crossref = "LICS:2002",
pages = "225--234",
abstract = "The digraph property KERNEL is a very simple and
well-known property studied in various areas. We
previously defined a variant of this property as a
counterexample of 0-1 law for the monadic existential
second order logic with at most two first-order
variables, over structures with 16 binary relations.
Goranko and Kapron have defined two variants in frames
which expresses frame satisfiability of propositional
modal logic, also expressible in a small fragment of
the logic above over structures with only one relation.
\\
We propose another variant of KERNEL which provides a
counterexample of the 0-1 law for frame satisfiability
of propositional modal logic. This refutes a result by
Halpern and Kapron which establishes that the 0-1 law
holds for this logic. It also strongly refines our
previous counterexample.",
}
@InProceedings{IshiharaKhoussainovRubin:LICS:2002,
author = "Hajime Ishihara and Bakhadyr Khoussainov and Sasha
Rubin",
title = "Some results on automatic structures",
crossref = "LICS:2002",
pages = "235--242",
abstract = "We study the class of countable structures which can
be presented by synchronous finite automata. We reduce
the problem of existence of an automatic presentation
of a structure to that for a graph. We exhibit a series
of properties of automatic equivalence structures,
linearly ordered sets and permutation structures. These
serve as a first step in producing practical
descriptions of some automatic structures or
illuminating the complexity of doing so for others.",
}
@InProceedings{Ong:LICS:2002,
author = "C.-H. L. Ong",
title = "Observational equivalence of 3rd-order Idealized
{A}lgol is decidable",
crossref = "LICS:2002",
pages = "245--256",
abstract = "We prove that observational equivalence of 3rd-order
finitary Idealized Algol (IA) is decidable using Game
Semantics. By modelling state explicitly in our games,
we show that the denotation of a term $M$ of this
fragment of IA (built up from finite base types) is a
compactly innocent strategy-with-state i.e. the
strategy is generated by a finite view function $f_M$.
Given any such $f_M$, we construct a real-time
deterministic pushdown automata (DPDA) that recognizes
the complete plays of the knowing-strategy denotation
of $M$. Since such plays characterize observational
equivalence, and there is an algorithm for deciding
whether any two DPDAs recognize the same language, we
obtain a procedure for deciding observational
equivalence of 3rd-order finitary IA. This algorithmic
representation of program meanings, which is
compositional, provides a foundation for model-checking
a wide range of behavioural properties of IA and other
cognate programming languages. Another result concerns
2nd-order IA with full recursion: we show that
observational equivalence for this fragment is
undecidable.",
}
@InProceedings{HylandSchalk:LICS:2002,
author = "Martin Hyland and Andrea Schalk",
title = "Games on Graphs and Sequantially Realizable
Functionals",
crossref = "LICS:2002",
pages = "257--264",
abstract = "We present a new category of games on graphs and
derive from it a model for Intuitionistic Linear Logic.
Our category has the computational flavour of concrete
data structures but embeds fully and faithfully in an
abstract games model. It differs markedly from the
usual Intuitionistic Linear Logic setting for
sequential algorithms. However, we show that with a
natural exponential we obtain a model for PCF
essentially equivalent to the sequential algorithms
model. We briefly consider a more extensional setting
and the prospects for a better understanding of the
Longley Conjecture.",
}
@InProceedings{Laurent:LICS:2002,
author = "Olivier Laurent",
title = "Polarized games",
crossref = "LICS:2002",
pages = "265--274",
abstract = "We generalize the intuitionistic Hyland-Ong games to a
notion of polarized games allowing games with plays
starting by proponent moves. The usual constructions on
games are adjusted to fit this setting yielding a game
model for polarized linear logic with a definability
result. As a consequence this gives a complete game
model for various classical systems: LC, lambda-mu
calculus, ... for both call-by-name and call-by-value
evaluations.",
}
@InProceedings{EdalatLieutier:LICS:2002,
author = "Abbas Edalat and Andre Lieutier",
title = "Domain Theory and Differential Calculus (Functions of
one variable)",
crossref = "LICS:2002",
pages = "277--286",
abstract = "We introduce a data type for differentiable functions
in the framework of domain theory. Using a new
structure, called tie of maps, which provide finitary
information about the differential properties of a
Scott continuous map, we define the derivative of a
Scott continuous function on the domain of intervals,
which is itself a Scott continuous function. This leads
to a domain-theoretic generalization of the fundamental
theorem of calculus. The central part of this work is
to construct a domain for differentiable real valued
functions of a real variable. The classical $C^1$
functions, equipped with its $C^1$ norm, is embedded
into the set of maximal elements of this domain, which
is a countably based bounded complete continuous domain
with an effective structure. The construction can be
generalized to $C^k$ and $C^\infty$ functions and, in
future, to functions of several variables and analytic
functions. While the question of computability for
differentiable functions have been studied in the
literature, this seems to be the first time that a
proper data type for differential calculus is proposed
which brings smooth mathematics in the realm of domain
theory and type theory. As an immediate application, we
present a domain-theoretic and effective generalization
of Picard's theorem, which provides a data type and an
algorithm for solving differential equations given by a
vector field and an initial condition. At each step of
computation of this algorithm, one gets an
approximation which is an interval piecewise polynomial
function with rational coefficients that provides
precise information on the solution.",
}
@InProceedings{Simpson:LICS:2002,
author = "Alex Simpson",
title = "Computational Adequacy for Recursive Types in Models
of Intuitionistic Set Theory",
crossref = "LICS:2002",
pages = "287--298",
abstract = "We present a general axiomatic construction of models
of FPC, a recursively typed lambda-calculus with
call-by-value operational semantics. Our method of
construction is to obtain such models as full
subcategories of categorical models of intuitionistic
set theory. This allows us to obtain a notion of model
that encompasses both domain-theoretic and
realizability models. We show that the existence of
solutions to recursive domain equations, needed for the
interpretation of recursive types, depends on the
strength of the set theory. The internal set theory of
an elementary topos is not strong enough to guarantee
their existence. However, solutions to recursive domain
equations do exist if models of intuitionistic
Zermelo-Fraenkel set theory are used instead. We apply
this result to interpret FPC, and we provide necessary
and sufficient conditions on a model for the
interpretation to be computationally adequate, i.e. for
the operational and denotational notions of termination
to agree.",
}
@InProceedings{Varacca:LICS:2002,
author = "Daniele Varacca",
title = "The powerdomain of indexed valuations",
crossref = "LICS:2002",
pages = "299--308",
abstract = "This paper is about combining nondeterminism and
probabilities. We study this phenomenon from a domain
theoretic point of view. In domain theory,
nondeterminism is modeled using the notion of
powerdomain, while probability is modeled using the
powerdomain of valuations. Those two functors do not
combine well, as they are. We define the notion of
powerdomain of indexed valuations, which can be
combined nicely with the usual nondeterministic
powerdomain. We show an equational characterization of
our construction. Finally we discuss the computational
meaning of indexed valuations, and we show how they can
be used, by giving a denotational semantics of a simple
imperative language.",
}
@InProceedings{Cook:LICS:2002,
author = "Stephen A. Cook",
title = "{Complexity Classes, Propositional Proof Systems, and
Formal Theories}",
type = "Invited lecture",
crossref = "LICS:2002",
pages = "311",
}
@InProceedings{HesseImmerman:LICS:2002,
author = "William Hesse and Neil Immerman",
title = "Complete problems for Dynamic Complexity Classes",
crossref = "LICS:2002",
pages = "313--322",
abstract = "We present the first complete problems for dynamic
complexity classes including the classes Dyn-FO and
DynThC$^0$, the dynamic classes corresponding to
relational calculus and (polynomially bounded) SQL,
respectively. The first problem we show complete for
Dyn-FO is a single-step version of the circuit value
problem (SSCV). \\
Of independent interest, our construction also produces
a first-order formula, $\zeta$, that is in a sense
universal for all first-order formulas. Since
first-order formulas are stratified by quantifier
depth, the first-order formula $\zeta$ emulates
formulas of greater depth by iterated application. As a
corollary we obtain a fixed quantifier block, QBC, that
is complete for all first-order quantifier blocks.",
}
@InProceedings{Atserias:LICS:2002,
author = "Albert Atserias",
title = "Unsatisfiable Random Formulas are Hard to Certify",
crossref = "LICS:2002",
pages = "325--334",
abstract = "We prove that every property of 3CNF formulas that
implies unsatisfiability and is expressible in Datalog
has asymptotic probability zero when formulas are
randomly generated by taking $6n$ non-trivial clauses
of exactly three literals uniformly and independently.
Our result is a consequence of designing a winning
strategy for Duplicator in the existential $k$-pebble
game on the structure that encodes the 3CNF formula and
a fixed template structure encoding a satisfiable
formula. The winning strategy makes use of certain
extension axioms that we introduce and hold almost
surely on a random 3CNF formula. An interesting feature
of our result is that it brings the fields of
Propositional Proof Complexity and Finite Model Theory
together. To make this connection more explicit, we
show that Duplicator wins the existential pebble game
on the structure encoding the Pigeonhole Principle and
the template structure above. Moreover, we also prove
that there exists a $2k$-Datalog program expressing
that an input 3CNF formula has a Resolution refutation
of width $k$. As a consequence to our result and the
known size-width relationship in Resolution, we obtain
new proofs of the exponential lower bounds for
Resolution refutations of random 3CNF formulas and the
Pigeonhole Principle.",
}
@InProceedings{Soltys:LICS:2002,
author = "Michael Soltys and Stephen A. Cook",
title = "The proof complexity of Linear Algebra",
crossref = "LICS:2002",
pages = "335--344",
abstract = "We introduce three formal theories of increasing
strength for linear algebra in order to study the
complexity of the concepts needed to prove the basic
theorems of the subject. We give what is apparently the
first feasible proofs of the Cayley-Hamilton theorem
and other properties of the determinant, and study the
propositional proof complexity of matrix identities.",
}
@InProceedings{Leivant:LICS:2002,
author = "Daniel Leivant",
title = "Calibrating computational feasibility by abstraction
rank",
crossref = "LICS:2002",
pages = "345--354",
abstract = "We characterize computationally the functions provable
in second order logic with set existence restricted to
natural classes of first order formulas. A
classification of first-order set-existence by
implicational rank yields a natural hierarchy of
complexity classes within the class of
Kalmar-elementary functions: The functions over
$\{0,1\}^*$ constructively provable using set existence
for formulas of implicational rank $\leq k$ are
precisely the functions computable in deterministic
time $O({\textit exp}_k(n))$, where ${\textit exp}_0 =
\cup_k(\lambda n. n^k)$, and ${\textit exp}_{k+1} =
2^{{\textit exp}_k}$. (For $k > 0$ provability here is
by normal deductions, a demonstrably necessary
proviso.) In particular, set-existence for positive
formulas yields exactly PTime. We thus obtain lean and
natural formalisms for codifying feasible mathematics,
which are expressive both in allowing second order
definitions and reasoning, and in incorporating
equational programming and reasoning about program
convergence in a direct and uncoded style.\\ Through a
formula-as-type morphism, we also obtain a link with
lambda definability, which we exhibit in the full
paper: The functions over $\{0,1\}^*$ definable in the
polymorphic lambda calculus $F_2$ over a base of type
of words, using first-order type-arguments of rank
$\leq k$, are precisely the functions computable in
deterministic time $O({\textit exp}_k(n))$ (again, for
$k > 0$ definability is by normal $\lambda$-terms.) The
poly-time case was proved (directly) in D. Leivant/J.Y.
Marion, Lambda-caluclus characterizations of poly-time,
Fund.Inform.19, pp. 167-184, 1993.",
}
@InProceedings{Lenzerini:LICS:2002,
author = "Diego Calvanese and Giuseppe De Giacomo and Maurizio Lenzerini",
title = "Description Logics: Foundations for Class-based
Knowledge Representation",
type = "Invited Tutorial",
crossref = "LICS:2002",
pages = "359--370",
}
@InProceedings{Otto:LICS:2002,
author = "Martin Otto",
title = "Modal and guarded characterisation theorems over
finite transition systems",
crossref = "LICS:2002",
pages = "371--380",
abstract = "Characterisation theorems for modal and guarded
fragments of first-order logic are explored over finite
transition systems. We show that the classical
characterisations in terms of semantic invariance under
the appropriate forms of bisimulation equivalence can
be recovered at the level of finite model theory. The
new, more constructive proofs naturally extend to
alternative proofs of the classical variants. A finite
model theory version of van Benthem's characterisation
theorem for basic modal logic, which similarly sheds
new light on the classical version, is due to E.Rosen.
That proof is simplified and the result slightly
strengthened in terms of quantitative bounds with the
new approach presented here. The main thrust of the
present paper, however, lies in a uniform treatment
that extends to incorporate universal and inverse
modalities and guarded quantification over transition
systems (width two relational structures). Apart from
first-order locality (specific ramifications of
Gaifman's theorem for bisimulation invariant formulae)
our treatment rests on a natural construction of
finite, locally acyclic covers for finite transition
systems. These covers can serve as finite analogues of
tree unravellings in providing local control over
first-order logic in finite bisimilar companion
structures.",
}
@InProceedings{Schnoebelen:LICS:2002,
author = "Fran{\c c}ois Laroussinie and Nicolas Markey and
Philippe Schnoebelen",
title = "Temporal logic with forgettable past",
crossref = "LICS:2002",
pages = "383--392",
abstract = "We investigate NLTL, a linear-time temporal logic with
forgettable past. NLTL can be exponentially more
succinct than LTL+Past (which in turn can be more
succinct than LTL). We study satisfiability and model
checking for NLTL and provide optimal
automata-theoretic algorithms for these
EXPSPACE-complete problems.",
}
@InProceedings{Hodkinson:LICS:2002,
author = "Ian Hodkinson and Frank Wolter and Michael
Zakharyaschev",
title = "Decidable and undecidable fragments of first-order
branching temporal logics",
crossref = "LICS:2002",
pages = "393--402",
abstract = "In this paper we analyze the decision problem for
fragments of first-order extensions of branching time
temporal logics such as computational tree logics CTL
and CTL* or Prior's Ockhamist logic of historical
nnecessity. On the one hand, we show that the
one-variable fragments of logics like first-order CTL*
- such as the product of propositional CTL* with simple
propositional modal logic S5, or even the one-variable
bundled first-order temporal logic with sole temporal
operator `some time in the future' - are undecidable.
On the other hand, it is proved that by restricting
applications of first-order quantifiers to state (i.e.,
path-independent) formulas, and applications of
temporal operators and path quantifiers to formulas
with at most one free variable, we can obtain decidable
fragments. The same arguments show decidability of
`non-local' propositional CTL*, in which truth values
of propositional atoms depend on the history as well as
the current time. The positive decidability results can
serve as a unifying framework for devising expressive
and effective time-dependent knowledge representation
formalisms, e.g., temporal description or
spatio-temporal logics.",
}
@InProceedings{Kreutzer:LICS:2002,
author = "Stephan Kreutzer",
title = "Expressive Equivalence of Least and Inflationary
Fixed-Point logic",
crossref = "LICS:2002",
pages = "403--410",
abstract = "We study the relationship between least and
inflationary fixed-point logic. By results of Gurevich
and Shelah from 1986, it has been known that on finite
structures both logics have the same expressive power.
On infinite structures however, the question whether
there is a formula in IFP not equivalent to any
LFP-formula was still open. In this paper, we settle
the question by showing that both logics are equally
expressive on arbitrary structures. The proof will also
establish the strictness of the nesting-depth hierarchy
for IFP on some infinite structures. Finally, we show
that the alternation hierarchy for IFP collapses to the
first level on all structures, i.e. the complement of
an inflationary fixed-point is an inflationary
fixed-point itself.",
}
@InProceedings{DesharnaisGupta:LICS:2002,
author = "Jos{\'e}e Desharnais and Vineet Gupta and Radha
Jagadeesan and Prakash Panangaden",
title = "The Metric Analogue of Weak Bisimulation for
Probabilistic Processes",
crossref = "LICS:2002",
pages = "413--422",
abstract = "We observe that equivalence is not a robust concept in
the presence of numerical information - such as
probabilities - in the model. We develop a metric
analogue of weak bisimulation in the spirit of our
earlier work on metric analogues for strong
bisimulation. We give a fixed point characterization of
the metric. This makes available coinductive reasoning
principles and allows us to prove metric analogues of
the usual algebraic laws for process combinators. We
also show that quantitative properties of interest are
continuous with respect to the metric, which says that
if two processes are close in the metric then
observable quantitative properties of interest are
indeed close. As an important example of this we show
that nearby processes have nearby channel capacities -
a quantitative measure of their propensity to leak
information.",
}
@InProceedings{HirschkoffLozesSangiorgi:LICS:2002,
author = "Daniel Hirschkoff and Etienne Lozes and Davide
Sangiorgi",
title = "Separability, expressiveness, and decidability in the
Ambient Logic",
crossref = "LICS:2002",
pages = "423--432",
abstract = "The Ambient Logic (AL) has been proposed for
expressing properties of process mobility in the
calculus of Mobile Ambients (MA), and as a basis for
query languages on semistructured data. We study some
basic questions concerning the descriptive and
discriminating power of AL, focusing on the equivalence
on processes induced by the logic (=L). We consider MA,
and two Turing complete subsets of it, MA1 and MA2,
respectively defined by imposing a semantic and a
syntactic constraint on process prefixes. The main
contributions include: coinductive and inductive
operational characterisations of =L; an axiomatisation
of =L on MA2; the construction of characteristic
formulas for the processes in MA1 with respect to =L;
the decidability of =L on MA1 and on MA2, and its
undecidability on MA.",
}
@InProceedings{HansenWinskel:LICS:2002,
author = "Mikkel Nygaard and Glynn Winskel",
title = "Linearity in Process Languages",
crossref = "LICS:2002",
pages = "433--444",
abstract = "The meaning and mathematical consequences of linearity
(managing without a presumed ability to copy) are
studied for a path-based model of processes which is
also a model of affine-linear logic. This connection
yields an affine-linear language for processes,
automatically respecting open-map bisimulation, in
which a range of process operations can be expressed.
An operational semantics is provided for the tensor
fragment of the language. Different ways to make
assemblies of processes lead to different choices of
exponential, some of which respect bisimulation.",
}
@InProceedings{Tiwari:LICS:2002,
author = "Ashish Tiwari",
title = "Deciding Confluence of Certain Term Rewriting Systems
in Polynomial Time",
crossref = "LICS:2002",
pages = "447--457",
abstract = "We present a polynomial time algorithm for deciding
confluence of ground term rewrite systems. We
generalize the decision procedure to get a polynomial
time algorithm, assuming that the maximum arity of a
symbol in the signature is a constant, for deciding
confluence of rewrite systems where each rule contains
a shallow linear term on one side and a ground term on
the other. The existence of a polynomial time algorithm
for deciding confluence of ground rewrite systems was
open for a long time and was independently solved only
recently by Comon, Godoy, and Nieuwenhuis [FoCS 2001].
Our decision procedure is based on the concepts of
abstract congruence closure and abstract rewrite
closure, which have been described by Bachmair and
Tiwari [CADE 2000] and Tiwari [FSTTCS 2001],
respectively.",
}
@Proceedings{LICS:2002,
title = "Logic in Computer Science",
booktitle = "Logic in Computer Science",
conference = "17th Annual IEEE Symposium",
key = "LICS",
year = "2002",
month = jul # " 22--25",
venue = "Copenhagen, Denmark",
publisher = "IEEE Computer Society",
address = "Los Alamitos, CA, USA",
ISBN = "0-7695-1483-9",
ISSN = "1043-6871",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NLULP - Natural Language Understanding and Logic Programming
% ====================================================
@InProceedings{AmoiaGardentThater:NLULP:2002,
author = "Marilisa Amoia and Claire Gardent and Stephan Thater",
title = "Using Set Constraints to Generate Distinguishing
Descriptions",
crossref = "NLULP:2002",
pages = "1--15",
abstract = "Algorithms such as (van Deemter 2000) which generate
distinguishing descriptions for sets of individuals
using positive, negative and disjunctive properties, do
not always generate a minimal description. In this
paper, we show that such an approach is cognitively
inappropriate in that the descriptions produced might
be unnecessary long and ambiguous and/or epistemically
redundant. We then present an alternative,
constraint-based algorithm which does produce minimal
descriptions and compare its performance with the
incremental algorithm.",
}
@InProceedings{CateShan:NLULP:2002,
author = "Balder ten Cate and Chung-chieh Shan",
title = "Question Answering: From Partitions to {P}rolog",
crossref = "NLULP:2002",
pages = "17--31",
abstract = "We implement Groenendijk and Stokhof's partition
semantics of questions in a simple question answering
algorithm. The algorithm is sound, complete, and based
on tableau theorem proving. The algorithm relies on a
syntactic characterization of answerhood: Any answer to
a question is equivalent to some formula built up only
from instances of the question. We prove this
characterization by translating the logic of
interrogation to classical predicate logic and applying
Craig's interpolation theorem.",
}
@InProceedings{Christiansen:NLULP:2002,
author = "Henning Christiansen",
title = "Abductive Language Interpretation as Bottom-up
Deduction",
crossref = "NLULP:2002",
pages = "33--47",
abstract = "A translation of abductive language interpretation
problems into a deductive form is proposed and shown to
be correct. No meta-level overhead is involved in the
resulting formulas that can be evaluated by bottom-up
deduction, e.g., by considering them as Constraint
Handling Rules. The problem statement may involve
background theories with integrity constraints, and
minimal contexts are produced that can explain a
discourse given.",
}
@InProceedings{DanielsMeurers:NLULP:2002,
author = "Mike Daniels and Detmar Meurers",
title = "Improving the Efficiency of Parsing with Discontinuous
Constituents",
crossref = "NLULP:2002",
pages = "49--68",
abstract = "We discuss a a generalization of Earley's algorithm to
grammars licensing discontinuous constituents of the
kind proposed by the so-called linearization approaches
in Head-Driven Phrase Structure Grammar. We show that
one can replace the standard indexing on the string
position by bitmasks that act as constraints over
possible coverage bitvectors. This improves efficiency
of edge access and reduces the number of edges by
constraining prediction to those grammar rules which
are compatible with known linearization properties. The
resulting parsing algorithm does not have to process
the right-hand side categories in the order in which
they cover the string, and so one can obtain a
head-driven strategy simply by reordering the
right-hand side categories of the rules. The resulting
strategy generalizes head-driven parsing in that it
also permits the ordering of non-head categories.",
}
@InProceedings{ErkKruijff:NLULP:2002,
author = "Katrin Erk and Geert-Jan M. Kruijff",
title = "A Constraint-programming Approach to Parsing with
Resource-sensitive Categorial Grammar",
crossref = "NLULP:2002",
pages = "69--86",
abstract = "Parsing with resource-sensitive categorial grammars is
an NP-complete problem. The traditional approach to
parsing with such grammars is based on generate and
test and cannot avoid this high worst-case complexity.
This paper proposes an alternative approach, based on
constraint programming: Given a grammar, constraints
formulated on an abstract interpretation of the
grammar's logical structure are used to prune the
search space during parsing. The approach is provably
sound and complete. Calculations of its complexity show
significant potential improvements on efficiency.",
}
@InProceedings{FoxLappinPollard:NLULP:2002,
author = "Chris Fox and Shalom Lappin and Carl Pollard",
title = "First-Order, {C}urry-Typed Logic for Natural Language
Semantics",
crossref = "NLULP:2002",
pages = "87--102",
abstract = "The paper presents Property Theory with Curry Typing
(PTCT) where the language of terms and well-formed
formulae are joined by a language of types. In addition
to supporting fine-grained intensionality, the basic
theory is essentially first-order, so that
implementations using the theory can apply standard
first-order theorem proving techniques. Some extensions
to the type theory are discussed, including the
possibility of adding type polymorphism",
}
@InProceedings{Gawronska:NLULP:2002,
author = "Barbara Gawronska",
title = "Employing Cognitive Notions in Multilingual
Summarization of News Reports",
crossref = "NLULP:2002",
pages = "103--120",
abstract = "The paper presents an approach to automatic text
understanding inspired by speech act theory and
cognitive semantics, especially by the notion of
`mental spaces' (Fauconnier 1985), and by Pustejovsky's
(1991a, 1991b, 1995) notion of `qualia' and his
definition of formal and telic hyponymy. This approach
is employed in an experimental system for understanding
of news reports and multilingual generation of news
summaries. The system, implemented in Prolog and
Delphi, aims at analyses of English news reports in the
domain of the world's news (military conflicts,
terrorists attacks, natural disasters) and generation
of summaries in Swedish, Danish, and Polish. The paper
focuses on the understanding component and on the
possibility of using WordNet as the main lexical
knowledge resource for English. An appropriate semantic
analysis and a successful summarization of English
input texts require some modifications of the
hyper-/hyponymy and holo-/meronymy relations that are
encoded in WordNet. A combination of a cognitive
analysis of certain lexical and phrasal categories
(speech act phrases, epistemic phrase, prepositions)
with qualia-based re-formulations of WordNet
hierarchies is proposed and tested.",
}
@Proceedings{NLULP:2002,
editor = "Shuly Wintner",
title = "Natural Language Understanding and Logic Programming",
booktitle = "Natural Language Understanding and Logic Programming",
conference = "7th International Workshop",
key = "NLULP",
pages = "vi+120",
year = "2002",
month = jul # " 28",
venue = "Copenhagen, Denmark",
series = "Datalogiske Skrifter",
volume = "92",
institution = "Roskilde University",
address = "Roskilde, Denmark",
ISSN = "0109-9779",
URL = "http://www.cs.haifa.ac.il/~shuly/nlulp02/papers/proc.pdf",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PAPM-PROBMIV - Process Algebra and Probabilistic Methods : Performance Mod...
% ==============================================================
@InProceedings{Schiper:PPMV:2002,
author = "Andr{\'e} Schiper",
title = "Failure detection vs. group membership in
fault-tolerant distributed systems: hidden trade-offs",
type = "Invited talk",
crossref = "PAPM-PROBMIV:2002",
pages = "1--15",
abstract = "Failure detection and group membership are two
important components of fault-tolerant distributed
systems. Understanding their role is essential when
developing efficient solutions, not only in
failure-free runs, but also in runs in which processes
do crash. While group membership provides consistent
information about the status of processes in the
system, failure detectors provide inconsistent
information. This paper discusses the trade-offs
related to the use of these two components, and
clarifies their roles using three examples. The first
example shows a case where group membership may
favourably be replaced by a failure detection
mechanism. The second example illustrates a case where
group membership is mandatory. Finally, the third
example shows a case where neither group membership nor
failure detectors are needed (they may be replaced by
\emph{weak ordering} oracles).",
}
@InProceedings{Sands:PPMV:2002,
author = "David Sands",
title = "Probability and Timing: Challenges for Secure
Programming",
type = "Invited talk",
crossref = "PAPM-PROBMIV:2002",
pages = "16",
abstract = "In this talk we will provide an overview of our recent
work on the specification and verification of secure
information flow in such programs. We highlight how
probabilistic considerations enter in two quite
different ways. In the first instance, probabilities
are an additional security threat. Concurrent systems
might exhibit probabilistic behaviour which an attacker
could exploit to leak information. In this case we look
at the modelling and verification of secure information
flow using probabilistic bisimulations. In the second
case, we look at how probabilistic behaviour can come
to our aide when trying to eliminate the information
flows which arise through the \emph{timing behaviour}
of programs. This talk is based on joint work with
Andrei Sabelfeld and Johan Agat.",
}
@InProceedings{AldiniGorrieri:PPMV:2002,
author = "Alessandro Aldini and Roberto Gorrieri",
title = "Security Analysis of a Probabilistic Non-repudiation
Protocol",
crossref = "PAPM-PROBMIV:2002",
pages = "17--36",
abstract = "Non-interference is a definition of security
introduced for the analysis of confidential information
flow in computer systems. In this paper, a
probabilistic notion of non-interference is used to
reveal information leakage which derives from the
probabilistic behavior of systems. In particular, as a
case study, we model and analyze a non-repudiation
protocol which employs a probabilistic algorithm to
achieve a fairness property. The analysis, conducted by
resorting to a definition of probabilistic
non-interference in the context of process algebras,
confirms that a solely nondeterministic approach to the
information flow theory is not enough to study the
security guarantees of cryptographic protocols.",
}
@InProceedings{BohnenkampHaverkort:PPMV:2002,
author = "Henrik Bohnenkamp and Boudewijn Haverkort",
title = "The Mean Value of the Maximum",
crossref = "PAPM-PROBMIV:2002",
pages = "37--56",
abstract = "This paper treats a practical problem that arises in
the area of stochastic process algebras. The problem is
the efficient computation of the mean value of the
maximum of phase-type distributed random variables. The
maximum of phase-type distributed random variables is
again phase-type distributed, however, its
representation grows exponentially in the number of
considered random variables. Although an efficient
representation in terms of Kronecker sums is
straightforward, the computation of the mean value
requires still exponential time, if carried out by
traditional means. In this paper, we describe an
approximation method to compute the mean value in only
polynomial time in the number of considered random
variables and the size of the respective
representations. We discuss complexity, numerical
stability and convergence of the approach.",
}
@InProceedings{DArgenioJeannetJensenLarsen:PPMV:2002,
author = "Pedro D'Argenio and Bertrand Jeannet and Henrik Jensen
and Kim Guldstrand Larsen",
title = "Reduction and Refinement Strategies For Probabilistic
Analysis",
crossref = "PAPM-PROBMIV:2002",
pages = "57--76",
abstract = "We report on new strategies for model checking
quantitative reachability properties of Markov decision
processes by successive refinements. In our approach,
properties are analyzed on abstractions rather than
directly on the given model. Such abstractions are
expected to be significantly smaller than the original
model, and may safely refute or accept the required
property. Otherwise, the abstraction is refined and the
process repeated. As the numerical analysis involved in
settling the validity of the property is more costly
than the refinement process, the method profits from
applying such numerical analysis on smaller state
spaces. The method is significantly enhanced by a
number of novel strategies: a strategy for reducing the
size of the numerical problems to be analyzed by
identification of so-called {\em essential states}, and
heuristic strategies for guiding the refinement
process.",
}
@InProceedings{FecherMajster-CederbaumWu:PPMV:2002,
author = "Harald Fecher and Mila Majster-Cederbaum and Jinzhao
Wu",
title = "Action Refinement for Probabilistic Processes with
True Concurrency Models",
crossref = "PAPM-PROBMIV:2002",
pages = "77--94",
abstract = "In this paper, we develop techniques of action
refinement for probabilistic processes within the
context of a probabilistic process algebra. A semantic
counterpart is carried out in a non-interleaving
causality based setting, probabilistic bundle event
structures. We show that our refinement notion has the
following nice properties: the behaviour of the refined
system can be inferred compositionally from the
behaviour of the original system and from the behaviour
of the systems substituted for actions; the
probabilistic extensions of pomset trace equivalence
and history preserving bisimulation equivalence are
both congruences under the refinement; and with respect
to a cpo-based denotational semantics the syntactic and
semantic refinements coincide with each other up to the
aforementioned equivalence relations when the internal
actions are abstracted away.",
}
@InProceedings{Haar:PPMV:2002,
author = "Stefan Haar",
title = "Probabilistic Unfoldings and Partial Order Fairness in
{P}etri Nets",
crossref = "PAPM-PROBMIV:2002",
pages = "95--114",
abstract = "The article investigates fairness and conspiracy in a
probabilistic framework, based on unfoldings of Petri
nets. Here, the unfolding semantics uses a new,
cluster-based view of local choice. The algorithmic
construction of the unfolding proceeds on two levels,
{\em choice} of steps inside conflict clusters, where
the choice may be fair or unfair, and the {em policy}
controlling the order in which clusters may act; this
policy may or may not conspire, e.g., against a
transition. In the context of an example where
conspiracy can hide in the partial order behavior of a
life and 1-safe Petri net, we show that, under
non-degenerate i.i.d. randomization on both levels,
both conspiracy and unfair behavior have probability 0.
The probabilistic model, using special Gibbs
potentials, is presented here in the context of 1-safe
nets, but extends to any Petri net.",
}
@InProceedings{Huth:PPMV:2002,
author = "Michael Huth",
title = "Possibilistic and probabilistic abstraction-based
model checking",
crossref = "PAPM-PROBMIV:2002",
pages = "115--134",
abstract = "We present a framework for the specification of
abstract models whose verification results transfer to
the abstracted models for a logic with unrestricted use
of negation and quantification. This framework is novel
in that its models have quantitative or probabilistic
observables and state transitions. Properties of a
quantitative temporal logic have measurable denotations
in these models. For probabilistic models such
denotations approximate the probabilistic semantics of
full LTL. We show how predicate-based abstractions
specify abstract quantitative and probabilistic models
with finite state space.",
}
@InProceedings{KwiatkowskaMehmood:PPMV:2002,
author = "Marta Kwiatkowska and Rashid Mehmood",
title = "Out-of-Core Solution of Large Linear Systems of
Equations arising from Stochastic Modelling",
crossref = "PAPM-PROBMIV:2002",
pages = "135--151",
abstract = "Many physical or computer systems can be modelled as
Markov chains. A range of solution techniques exist to
address the state-space explosion problem, encountered
while analysing such Markov models. However, numerical
solution of these Markov chains is impeded by the need
to store the probability vector(s) explicitly in the
main memory. In this paper, we extend the earlier
out-of-core methods for the numerical solution of large
Markov chains and introduce an algorithm which uses a
disk to hold the probability vector as well as the
matrix. We give experimental results of the
implementation of the algorithm for a Kanban
manufacturing system and a flexible manufacturing
system. Next, we describe how the algorithm can be
modified to exploit sparsity structure of a model,
leading to better performance. We discuss two models, a
cyclic server polling system and a workstation cluster
system, in this context and present results for the
polling models. We also introduce a new sparse matrix
storage format which can provide 30\% or more saving
over conventional schemes.",
}
@InProceedings{KwiatkowskaNormanPacheco:PPMV:2002,
author = "Marta Kwiatkowska and Gethin Norman and Antonio
Pacheco",
title = "Model Checking {CSL} Until Formulae with Random Time
Bounds",
crossref = "PAPM-PROBMIV:2002",
pages = "152--168",
abstract = "Continuous Time Markov Chains (CTMCs) are widely used
as the underlying stochastic process in performance and
dependability analysis. Model checking of CTMCs against
Continuous Stochastic Logic (CSL) has been investigated
previously by a number of authors. CSL contains a
time-bounded until operator that allows one to express
properties such as ``the probability of 3 servers
becoming faulty within $7.01$ seconds is at most 0.1''.
In this paper we extend CSL with a random time-bounded
until operator, where the time bound is given by a
random variable instead of a fixed real-valued time (or
interval). With the help of such an operator we can
state that the probability of reaching a set of goal
states within some generally distributed delay while
passing only through states that satisfy a certain
property is at most (at least) some probability
threshold. In addition, certain transient properties of
systems which contain general distributions can be
expressed with the extended logic. We extend the
efficient model checking of CTMCs against the logic CSL
developed in [Katoen et al. 2001] to cater for the new
operator. Our method involves precomputing a family of
coefficients for a range of random variables which
includes Pareto, uniform and gamma distributions, but
otherwise carries the same computational cost as that
for ordinary time-bounded until. We implement the
algorithms in {\sc Matlab} and evaluate them by means
of a queueing system example.",
}
@InProceedings{KwiatkowskaNormanSproston:PPMV:2002,
author = "Marta Kwiatkowska and Gethin Norman and Jeremy
Sproston",
title = "Probabilistic Model Checking of the {IEEE} 802.11
Wireless Local Area Network Protocol",
crossref = "PAPM-PROBMIV:2002",
pages = "169--187",
abstract = "The international standard IEEE~802.11 was developed
recently in recognition of the increased demand for
wireless local area networks. Its medium access control
mechanism is described according to a variant of the
Carrier Sense Multiple Access with Collision Avoidance
(CSMA/CA) scheme. Although collisions cannot always be
prevented, randomized exponential backoff rules are
used in the retransmission scheme to minimize the
likelihood of repeated collisions. More precisely, the
backoff procedure involves a uniform probabilistic
choice of an integer-valued delay from an interval,
where the size of the interval grows exponentially with
regard to the number of retransmissions of the current
data packet. We model the two-way handshake mechanism
of the IEEE~802.11 standard with a fixed network
topology using probabilistic timed automata, a formal
description mechanism in which both nondeterministic
choice and probabilistic choice can be represented.
From our probabilistic timed automaton model, we obtain
a finite-state Markov decision process via a
property-preserving discrete-time semantics. The Markov
decision process is then verified using {\sc Prism}, a
probabilistic model checking tool, against
probabilistic, timed properties such as ``at most 5,000
microseconds pass before a station sends its packet
correctly.''",
}
@InProceedings{KuntzSiegle:PPMV:2002,
author = "Matthias Kuntz and Markus Siegle",
title = "Deriving Symbolic Representations from Stochastic
Process Algebras",
crossref = "PAPM-PROBMIV:2002",
pages = "188--206",
abstract = "A new denotational semantics for a variant of the
stochastic process algebra TIPP is presented, which
maps process terms to Multi-terminal binary decision
diagrams. It is shown that the new semantics is
Markovian bisimulation equivalent to the standard SOS
semantics. The paper also addresses the difficult
question of keeping the underlying state space minimal
at every construction step.",
}
@InProceedings{Bournez:PPMV:2002,
author = "Olivier Bournez",
title = "A Generalization of Equational Proof Theory?",
type = "Short presentation",
crossref = "PAPM-PROBMIV:2002",
pages = "207--208",
}
@InProceedings{Bravetti:PPMV:2002,
author = "Mario Bravetti",
title = "An Integrated Approach for the Specification and
Analysis of Stochastic Real-Time Systems",
type = "Short presentation",
crossref = "PAPM-PROBMIV:2002",
pages = "209--210",
}
@InProceedings{DiPierroWiklicky:PPMV:2002,
author = "Alessandra Di Pierro and Herbert Wiklicky",
title = "Probabilistic Abstract Interpretation and Statistical
Testing",
type = "Short presentation",
crossref = "PAPM-PROBMIV:2002",
pages = "211--212",
}
@InProceedings{LassaignePeyronnet:PPMV:2002,
author = "Richard Lassaigne and Sylvain Peyronnet",
title = "Approximate verification of probabilistic systems",
type = "Short presentation",
crossref = "PAPM-PROBMIV:2002",
pages = "213--214",
}
@Proceedings{PAPM-PROBMIV:2002,
editor = "Holger Hermanns and Roberto Segala",
title = "Process Algebra and Probabilistic Methods :
Performance Modeling and Verification",
booktitle = "Process Algebra and Probabilistic Methods :
Performance Modeling and Verification",
conference = "2nd Joint International Workshop",
key = "PAPM-PROBMIV",
year = "2002",
month = jul # " 25--26",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2399",
publisher = "Springer-Verlag",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PCL - Paraconsistent Computational Logic
% ==================================
@InProceedings{BesnardSchaubTompitsWoltran:PCL:2002,
author = "Philippe Besnard and Torsten Schaub and Hans Tompits
and Stefan Woltran",
title = "Paraconsistent Reasoning via Quantified Boolean
Formulas, {I}: {A}xiomatising Signed Systems",
crossref = "PCL:2002",
pages = "1--15",
abstract = "Signed systems were introduced as a general,
syntax-independent framework for paraconsistent
reasoning, that is, non-trivialised reasoning from
inconsistent information. In this paper, we show how
the family of corresponding paraconsistent consequence
relations can be axiomatised by means of quantified
Boolean formulas. This approach has several benefits.
First, it furnishes an axiomatic specification of
paraconsistent reasoning within the framework of signed
systems. Second, this axiomatisation allows us to
identify upper bounds for the complexity of the
different signed consequence relations. We strengthen
these upper bounds by providing strict complexity
results for the considered reasoning tasks. Finally, we
obtain an implementation of different forms of
paraconsistent reasoning by appeal to the existing
system QUIP.",
}
@InProceedings{Zhang:PCL:2002,
author = "Guo-Qiang Zhang",
title = "Axiomatic Aspects of Default Inference",
crossref = "PCL:2002",
pages = "17--32",
abstract = "Properties of classical (logical) entailment relation
have been well studied and well-understood, either with
or without the presence of logical connectives. There
is, however, less uniform agreement on laws for the
nonmonotonic consequence relation. This paper studies
axioms for nonmonotonic consequences from a
semantics-based point of view, focusing on a class of
mathematical structures for reasoning about partial
information without a predefined syntax/logic. This
structure is called a default structure. We study
axioms for the nonmonotonic consequence relation
derived from extensions as in Reiter's default logic,
using skeptical reasoning, but extensions are now used
for the construction of possible worlds in a default
information structure. In previous work we showed that
skeptical reasoning arising from default-extensions
obeys a well-behaved set of axioms including the axiom
of cautious cut. We show here that, remarkably, the
converse is also true: any consequence relation obeying
this set of axioms can be represented as one
constructed from skeptical reasoning. We provide
representation theorems to relate axioms for
nonmonotonic consequence relation and properties about
extensions, and provide a one-to-one correspondence
between nonmonotonic systems which satisfies the law of
cautious monotony and default structures with unique
extensions. Our results give a theoretical
justification for a set of basic rules governing the
update of nonmonotonic knowledge bases, demonstrating
the derivation of them from the more concrete and
primitive construction of extensions. It is also
striking to note that proofs of the representation
theorems show that only shallow extensions are
necessary, in the sense that the number of iterations
needed to achieve an extension is at most three. All of
these developments are made possible by taking a more
liberal view of consistency: consistency is a user
defined predicate, satisfying some basic properties.",
}
@InProceedings{Villadsen:PCL:2002,
author = "J{\o}rgen Villadsen",
title = "A Paraconsistent Higher Order Logic",
crossref = "PCL:2002",
pages = "33--49",
abstract = "Classical logic predicts that everything (thus nothing
useful at all) follows from inconsistency. A
paraconsistent logic is a logic where an inconsistency
does not lead to such an explosion, and since in
practice consistency is difficult to achieve there are
many potential applications of paraconsistent logics in
knowledge-based systems, logical semantics of natural
language, etc. Higher order logics have the advantages
of being expressive and with several automated theorem
provers available. Also the type system can be helpful.
We present a concise description of a paraconsistent
higher order logic with countable infinite
indeterminacy, where each basic formula can get its own
indeterminate truth value (or as we prefer: truth
code). The meaning of the logical operators is new and
rather different from traditional many-valued logics as
well as from logics based on bilattices. The adequacy
of the logic is examined by a case study in the domain
of medicine. Thus we try to build a bridge between the
HOL and MVL communities. A sequent calculus is proposed
based on recent work by Muskens.",
}
@InProceedings{ArieliDeneckerNuffelenBruynooghe:PCL:2002,
author = "Ofer Arieli and Marc Denecker and Bert Van Nuffelen
and Maurice Bruynooghe",
title = "Repairing Inconsistent Databases: {A} Model-Theoretic
Approach and Abductive Reasoning",
crossref = "PCL:2002",
pages = "51--65",
abstract = "In this paper we consider two points of views to the
problem of coherent integration of distributed data.
First we give a pure model-theoretic analysis of the
possible ways to {"}repair{"} a database. We do so by
characterizing the possibilities to {"}recover{"}
consistent data from an inconsistent database in terms
of those models of the database that exhibit as minimal
inconsistent information as reasonably possible. Then
we introduce an abductive application to restore the
consistency of a given database. This application is
based on an abductive solver (A-system) that implements
an SLDNFA-resolution procedure, and computes a list of
data-facts that should be inserted to the database or
retracted from it in order to keep the database
consistent. The two approaches for coherent data
integration are related by soundness and completeness
results.",
}
@InProceedings{Maher:PCL:2002,
author = "Michael Maher",
title = "A Model-Theoretic Semantics for Defeasible Logic",
crossref = "PCL:2002",
pages = "67--80",
abstract = "Defeasible logic is an efficient logic for defeasible
reasoning. It is defined through a proof theory and,
until now, has had no model theory. In this paper a
model-theoretic semantics is given for defeasible
logic. The logic is sound and complete with respect to
the semantics. We also briefly outline how this
approach extends to a wide range of defeasible
logics.",
}
@InProceedings{MaluszynskiVitoria:PCL:2002,
author = "Jan Ma{\l}uszy{\'n}ski and Aida Vit{\'o}ria",
title = "Defining Rough Sets by Extended Logic Programs",
crossref = "PCL:2002",
pages = "81--90",
abstract = "We show how definite extended logic programs can be
used for defining and reason with rough sets. Moreover,
a rough-set-specific query language is presented and an
answering algorithm is outlined. Thus, we not only show
a possible application of a paraconsistent logic to the
field of rough sets as we also establish a link between
rough set theory and logic programming, making possible
transfer of expertise between both fields.",
}
@InProceedings{Batens:PCL:2002,
author = "Diderik Batens",
title = "On a Partial Decision Method for Dynamic Proofs",
type = "Invited talk",
crossref = "PCL:2002",
pages = "91--108",
abstract = "This paper concerns a goal directed proof procedure
for the propositional fragment of the adaptive logic
ACLuN1. At the propositional level, it forms an
algorithm for final derivability. If extended to the
predicative level, it provides a criterion for final
derivability. This is essential in view of the absence
of a positive test. The procedure may be generalized to
all flat adaptive logics.",
}
@InProceedings{GoldinWegner:PCL:2002,
author = "Dina Goldin and Peter Wegner",
title = "Paraconsistency of Interactive Computation",
crossref = "PCL:2002",
pages = "109--118",
abstract = "The goal of computational logic is to allow us to
model computation as well as to reason about it. We
argue that a computational logic must be able to model
interactive computation. We show that first-order logic
cannot model interactive computation due to the
incompleteness of interaction. We show that interactive
computation is necessarily paraconsistent, able to
model both a fact and its negation, due to the role of
the world (environment) in determining the course of
the computation. We conclude that paraconsistency is a
necessary property for a logic that can model
interactive computation.",
}
@InProceedings{Bry:PCL:2002,
author = "Fran{\c c}ois Bry",
title = "An Almost Classical Logic for Logic Programming and
Nonmonotonic Reasoning",
crossref = "PCL:2002",
pages = "119--134",
abstract = "The model theory of a first-order logic called N4 is
introduced. N4 does not eliminate double negations, as
classical logic does, but instead reduces fourfold
negations. N4 is very close to classical logic: N4 has
two truth values; implications are, in N4 like in
classical logic, material; and negation distributes
over compound formulas in N4 as it does in classical
logic. Results suggest that the semantics of normal
logic programs is conveniently formalized in N4:
Classical logic Herbrand interpretations generalize
straightforwardly to N4; the classical minimal Herbrand
model of a positive logic program coincides with its
unique minimal N4 Herbrand model; the stable models of
a normal logic program and its so-called complete
minimal N4 Herbrand models coincide.",
}
@Proceedings{PCL:2002,
editor = "Hendrik Decker and J{\o}rgen Villadsen and Toshiharu
Waragai",
title = "Paraconsistent Computational Logic",
booktitle = "Paraconsistent Computational Logic",
key = "PCL",
pages = "viii+134",
year = "2002",
month = jul # " 27",
venue = "Copenhagen, Denmark",
series = "Datalogiske Skrifter",
volume = "95",
institution = "Roskilde University",
address = "Roskilde, Denmark",
ISSN = "0109-9779",
abstract = "This proceedings volume contains the papers presented
at the ICLP 2002 workshop Paraconsistent Computational
Logic, on July 27, in Copenhagen, Denmark, as part of
the Federated Logic Conference (FLoC).",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaPS - Problems and Problem Sets for {ATP}
% ===================================
@InProceedings{Harrison:FLoC:2002:RADM+*PaPS,
author = "John Harrison",
title = "Extracting Test Problems from Real Applications",
type = "Invited talk",
crossref = "PaPS:2002",
pages = "(unpaginated)",
note = "PaPS proceedings contains only the abstract",
abstract = "{The HOL Light theorem prover has a number of
automated subsystems, e.g., a model elimination
procedure for first order logic with equality, and
arithmetic provers for linear and non-linear
arithmetic. The sub-problems that are dealt with by
these components can easily be extracted to give a good
selection of the relatively easily decidable problems
that arise in ``real'' applications, such as
formalizing mathematics and performing industrial
verifications. These can then be used as test problems
for other automated provers, and possibly incorporated
into standard test suites such as TPTP. We have already
made available some test problems generated in this
way. \\
This simple approach has the disadvantage that the
problems tend to be relatively easy, and self-selected
for the particular methods used in HOL's own provers.
However, since HOL is an LCF-style prover, it is
essentially trivial to capture all proofs in the
system, regardless of whether they are wholly or partly
automated. Using this technique, we can generate
realistic test problems of essentially arbitrary
difficulty.}",
}
@InProceedings{Zimmer:PaPS:2002,
author = "J{\"u}rgen Zimmer and Andreas Franke and Simon Colton
and Geoff Sutcliffe",
title = "Integrating {HR} and {tptp2X} into {MathWeb} to
Compare Automated Theorem Provers",
crossref = "PaPS:2002",
pages = "(unpaginated)",
abstract = "{The assessment and comparison of automated theorem
proving systems (ATPs) is important for the advancement
of the field. At present, the de facto assessment
method is to test provers on the TPTP library of nearly
6000 theorems. We describe here a project which aims to
complement the TPTP service by automatically generating
theorems of sufficient difficulty to provide a
significant test for first order provers. This has been
achieved by integrating the HR automated theory
formation program into the MathWeb Software Bus. HR
generates first order conjectures in TPTP format and
passes them to a concurrent ATP service in MathWeb.
MathWeb then uses the tptp2X utility to translate the
conjectures into the input format of a set of provers.
In this way, various ATP systems can be compared on
their performance over sets of thousands of theorems
they have not been previously exposed to. Our purpose
here is to describe the integration of various new
programs into the MathWeb architecture, rather than to
present a full analysis of the performance of theorem
provers. However, to demonstrate the potential of the
combination of the systems, we describe some
preliminary results from experiments in group
theory.}",
}
@InProceedings{Hsiang:PaPS:2002,
author = "Jieh Hsiang and Yuh Pyng Shieh and YaoChinag Chen",
title = "The Cyclic Complete Mapppings Counting Problems",
crossref = "PaPS:2002",
pages = "(unpaginated)",
abstract = "{A \emph{complete mapping} of a group $(G,+)$ is a
permutation $f(x)$ of $G$ with $f(0)=0$ such that
$-x+f(x)$ is also a permutation of $G$. Given a group
$G$, the \emph{Complete Mappings Counting Problem} is
to find, if any, the number of complete mappings of
$G$. Complete mapping problems are ideal for testing
the strength of propositional solvers. In this paper we
describe various types of complete mapping problems,
and their relationship with variations of the $n$-queen
problems. We also present several forms of {\em
symmetry operators} which, in addition to being
theoretically interesting on their own, are crucial for
improving the efficiency of the provers. Several
classes of challenge problems for propositional provers
are given, so are the transformations of these problems
into propositional format.}",
}
@InProceedings{Slaney:PaPS:2002,
author = "John Slaney",
title = "A Benchmark Template for Substructural Logics",
crossref = "PaPS:2002",
pages = "(unpaginated)",
abstract = "{This paper suggests a benchmark template for
non-classical propositional reasoning systems, based on
work done some twenty years ago in the investigation of
relevant logic. The idea is simple: generate
non-equivalent binary operations in the language of the
logic and use an automated rea- soning system to decide
which ones satisfy given algebraic properties. Of
course, problem classes generated in this way are not
in any sense uniformly distributed: indeed, they are
highly structured and have special features such as a
low ratio of variables to length. Nonetheless, they
have the character of theorem proving ``in the field'',
and should be part of the evaluation equipment for
systems dealing with a wide range of nonclassical
logics.}",
}
@InProceedings{Ernst:PaPS:2002,
author = "Zac Ernst and others",
title = "More First-order Test Problems in Math and Logic",
crossref = "PaPS:2002",
pages = "(unpaginated)",
abstract = "{This paper contains a collection of theorems,
nontheorems, and conjectures in first-order and
equational logic. These problems arose in our work on
applications of automated deduction to mathematics and
logic. Some originated in our work, and others were
sent to us as challenge problems or open questions.}",
}
@InProceedings{Claessen:PaPS:2002,
author = "Koen Claessen and Reiner H{\"a}hnle and Johan
M{\aa}rtensson",
title = "Verification of Hardware Systems with First-Order
Logic",
crossref = "PaPS:2002",
pages = "(unpaginated)",
abstract = "{The state of the art of automatic first order logic
theorem provers is advanced enough to be useful in a
commercial context. This paper describes a way in which
first order logic and theorem provers are used at the
Swedish formal verification company Safelogic, to
formally verify properties of hardware systems. Two
dirent verification methods are discussed, which both
make use of translations of formalisms into first order
logic. We draw some preliminary conclusions from our
experiences and provide problems sets and
benchmarks.}",
}
@Proceedings{PaPS:2002,
editor = "Geoff Sutcliffe and Jeff Pelletier and Christian
Suttner",
title = "Problems and Problem Sets for {ATP}",
booktitle = "Problems and Problem Sets for {ATP}",
conference = "Workshop",
key = "PaPS",
year = "2002",
month = jul # "31--" # aug # " 1",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-10",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RADM - Role of Automated Deduction in Mathematics
% ==========================================
@InProceedings{Cairns:RADM:2002,
author = "Paul Cairns and Jeremy Gow",
title = "Automated deduction systems for real mathematicians",
crossref = "RADM:2002",
pages = "1--10",
}
@InProceedings{Chen:RADM:2002,
author = "Chen Lingjun and Hidetsune Kobayashi and Hirokazu
Murao and Hideo Suzuki",
title = "Notes on formalizing induction on the number of sets",
crossref = "RADM:2002",
pages = "11--23",
}
@InProceedings{Colton:RADM:2002,
author = "Simon Colton and Roy McCasland and Alan Bundy and Toby
Walsh",
title = "Automated theory formation for tutoring tasks in pure
mathematics",
crossref = "RADM:2002",
pages = "25--42",
}
@InProceedings{Kerber:RADM:2002,
author = "Manfred Kerber and Martin Pollet",
title = "On the design of mathematical concepts",
crossref = "RADM:2002",
pages = "43--59",
}
@InProceedings{Schwarzweller:RADM:2002,
author = "Christoph Schwarzweller",
title = "Symbolic deduction in mathematical databases based on
properties",
crossref = "RADM:2002",
pages = "61--68",
}
@Proceedings{RADM:2002,
editor = "Simon Colton and Volker Sorge",
title = "Role of Automated Deduction in Mathematics",
booktitle = "Role of Automated Deduction in Mathematics",
conference = "Second Workshop",
key = "RADM",
pages = "vi+68",
year = "2002",
month = jul # " 31",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-09",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RTA - Rewriting Techniques and Applications
% =====================================
@InProceedings{ShankarRuess:RTA:2002,
author = "Natarajan Shankar and Harald Rue{\ss}",
title = "Combining {Shostak} Theories",
type = "Unscheduled",
crossref = "RTA:2002",
pages = "1--18",
abstract = "Ground decision procedures for combinations of
theories are used in many systems for automated
deduction. There are two basic paradigms for combining
decision procedures. The Nelson--Oppen method combines
decision procedures for disjoint theories by exchanging
equality information on the shared variables. In
Shostak's method, the combination of the theory of pure
equality with canonizable and solvable theories is
decided through an extension of congruence closure that
yields a canonizer for the combined theory. Shostak's
original presentation, and others that followed it,
contained serious errors which were corrected for the
basic procedure by the present authors. Shostak also
claimed that it was possible to combine canonizers and
solvers for disjoint theories. This claim is easily
verifiable for canonizers, but is unsubstantiated for
the case of solvers. We show how our earlier procedure
can be extended to combine multiple disjoint
canonizable, solvable theories within the Shostak
framework.",
}
@InProceedings{Mitchell:RTA:2002,
author = "John C. Mitchell",
title = "Multiset Rewriting and Security Protocol Analysis",
type = "Invited talk",
crossref = "RTA:2002",
pages = "19--22",
abstract = "The Dolev-Yao model of security protocol analysis may
be formalized using a notation based on multi-set
rewriting with existential quantification. This
presentation describes the multiset rewriting approach
to security protocol analysis, algorithmic upper and
lower bounds on specific forms of protocol analysis,
and some of the ways this model is useful for
formalizing sublte properties of specific protocols",
}
@InProceedings{Baader:RTA:2002,
author = "Franz Baader",
title = "Engineering of Logics for the Content-based
Representation of Information",
type = "Invited talk",
crossref = "RTA:2002",
pages = "23",
abstract = "Storage and transfer of information as well as
interfaces for accessing this information have
undergone a remarkable evolution. Nevertheless,
information systems are still not `intelligent' in the
sense that they `understand' the information they
store, manipulate, and present to their users. A case
in point is the world wide web and search engines
allowing to access the vast amount of information
available there. Web-pages are mostly written for human
consumption and the mark-up provides only rendering
information for textual and graphical information.
Search engines are usually based on keyword search and
often provide a huge number of answers, many of which
are completely irrelevant, whereas some of the more
interesting answers are not found. In contrast, the
vision of a `semantic web' aims for
machine-understandable web resources, whose content can
then be comprehended and processed both by automated
tools, such as search engines, and by human users. \\
The content-based representation of information
requires representation formalisms with a well-defined
formal semantics since otherwise there cannot be a
common understanding of the represented information.
This semantics can elegantly be provided by a
translation into an appropriate logic or the use of a
logic-based formalism in the first place. This logical
approach has the additional advantage that logical
inferences can then be used to reason about the
represented information, thus detecting inconsistencies
and computing implicit information. However, in this
setting there is a fundamental tradeoff between the
expressivity of the representation formalism on the one
hand, and the efficiency of reasoning with this
formalism on the other hand. \\
This motivates the `engineering of logics', i.e., the
design of logical formalisms that are tailored to
specific representation tasks. This also encompasses
the formal investigation of the relevant inference
problems, the development of appropriate inferences
procedures, and their implementation, optimization, and
empirical evaluation. Another important topic in this
context is the combination of logics and their
inference procedures since a given application my
require the use of more than one specialized logic. The
talk will illustrate this approach with the example of
so-called Description Logics and their application as
ontology languages for the semantic web.",
}
@InProceedings{Mellies:RTA:2002,
author = "Paul-Andr{\'e} Melli{\`e}s",
title = "Axiomatic Rewriting Theory {VI}: Residual Theory
Revisited",
crossref = "RTA:2002",
pages = "24--50",
abstract = "Residual theory is the algebraic theory of confluence
for the $\lambda$-calculus, and more generally
\emph{conflict-free} rewriting systems (without
critical pairs). The theory took its modern shape in
L{\'e}vy's PhD thesis, after Church, Rosser and Curry's
seminal steps. There, L{\'e}vy introduces a
\emph{permutation equivalence} between rewriting paths,
and establishes that among all confluence diagrams
$P\longrightarrow N\longleftarrow Q$ completing a span
$P\longleftarrow M\longrightarrow Q$, there exists a
\emph{minimum} such one, modulo permutation
equivalence. Categorically, the diagram is called a
\emph{pushout.} \\
In this article, we extend L{\'e}vy's residual theory,
in order to enscope ``border-line'' rewriting systems,
which admit critical pairs but enjoy a strong
Church-Rosser property (existence of pushouts.) Typical
examples are the \emph{associativity rule} and the
\emph{positive braid} rewriting systems. Finally, we
show that the resulting theory reformulates and
clarifies L{\'e}vy's optimality theory for the
$\lambda$-calculus, and its so-called ``extraction
procedure''.",
}
@InProceedings{KennawayKhasidashvilPiperno:RTA:2002,
author = "Richard Kennaway and Zurab Khasidashvili and Adolfo
Piperno",
title = "Static Analysis of Modularity of beta-reduction in the
Hyperbalanced lambda-calculus",
crossref = "RTA:2002",
pages = "51--65",
abstract = "We investigate the degree of \emph{parallelism (or
modularity)} in the \emph{hyperbalanced
$\lambda$-calculus}, $\lambda_H$, a subcalculus of
$\lambda$-calculus containing all simply typable terms
(up to a restricted $\eta$-expansion). In technical
terms, we study the family relation on redexes in
$\lambda_H$, and the contribution relation on
redex-families, and show that the latter is a forest
(as a partial order). This means that hyperbalanced
$\lambda$-terms allow for maximal possible parallelism
in computation. To prove our results, we use and
further refine, for the case of hyperbalanced terms,
some well known results concerning \emph{paths}, which
allow for static analysis of many fundamental
properties of $\beta$-reduction.",
}
@InProceedings{FaureKirchner:RTA:2002,
author = "Germain Faure and Claude Kirchner",
title = "Exceptions in the Rewriting Calculus",
crossref = "RTA:2002",
pages = "66--82",
abstract = "In the context of the rewriting calculus, we introduce
and study an exception mechanism that allows us to
express in a simple way rewriting strategies and that
is therefore also useful for expressing theorem proving
tactics. The proposed exception mechanism is expressed
in a confluent calculus which gives the ability to
simply express the semantics of the FIRST tactical and
to describe in full details the expression of
conditional rewriting.",
}
@InProceedings{Struth:RTA:2002,
author = "Georg Struth",
title = "Deriving Focused Lattice Calculi",
crossref = "RTA:2002",
pages = "83--97",
abstract = "We derive rewrite-based ordered resolution calculi for
semilattices, distributive lattices and boolean
lattices. Using ordered resolution as a metaprocedure,
theory axioms are first transformed into independent
bases. Focused inference rules are then extracted from
inference patterns in refutations. The derivation is
guided by mathematical and procedural background
knowledge, in particular by ordered chaining calculi
for quasiorderings (forgetting the lattice structure),
by ordered resolution (forgetting the clause structure)
and by Knuth-Bendix completion for non-symmetric
transitive relations (forgetting both structures).
Conversely, all three calculi are derived and proven
complete in a transparent and generic way as special
cases of the lattice calculi.",
}
@InProceedings{SekiEA:RTA:2002,
author = "Hiroyuki Seki and Toshinori Takai and Youhei Fujinaka
and Yuichi Kaji",
title = "Layered Transducing Term Rewriting System and Its
Recognizability Preserving Property",
crossref = "RTA:2002",
pages = "98--113",
abstract = "A term rewriting system which effectively preserves
recognizability (EPR-TRS) has good mathematical
properties. In this paper, a new subclass of TRSs,
layered transducing TRSs (LT-TRSs) is defined and its
recognizability preserving property is discussed. The
class of LT-TRSs contains some EPR-TRSs, e.g.,
$\{f(x)\to f(g(x))\}$ which do not belong to any of the
known decidable subclasses of EPR-TRSs. Bottom-up
linear tree transducer, which is a well-known
computation model in the tree language theory, is a
special case of LT-TRS. We present a sufficient
condition for an LT-TRS to be an EPR-TRS. Also some
properties of LT-TRSs including reachability are shown
to be decidable.",
}
@InProceedings{OhsakiTakai:RTA:2002,
author = "Hitoshi Ohsaki and Toshinori Takai",
title = "Decidability and closure properties for computation on
equational tree languages",
crossref = "RTA:2002",
pages = "114--128",
abstract = "Equational tree automata provide a powerful tree
language framework that facilitates to recognize
congruence closures of tree languages. In the paper we
show the emptiness problem for AC-tree automata and the
intersection-emptiness problem for regular AC-tree
automata, each of which was open in our previous work,
are decidable, by a straightforward reduction to the
reachability problem for ground AC-term rewriting. The
newly obtained results generalize decidability of
so-called \emph{reachable property problem} of Mayr and
Rusinowitch. We then discuss complexity issue of
AC-tree automata. Moreover, in order to solve some
other questions about regular A- and AC-tree automata,
we recall the basic connection between word languages
and tree languages.",
}
@InProceedings{RetyVuotto:RTA:2002,
author = "Pierre R{\'e}ty and Julie Vuotto",
title = "Regular Sets of Descendants by Some Rewrite
Strategies",
crossref = "RTA:2002",
pages = "129--143",
abstract = "For a constructor-based rewrite system $R$, a regular
set of ground terms $E$, and assuming some additional
restrictions, we build a finite tree automaton that
recognizes the descendants of $E$, i.e.~the terms
issued from $E$ by rewriting, according to innermost,
innermost-leftmost, and outermost strategies.",
}
@InProceedings{Waldmann:RTA:2002,
author = "Johannes Waldmann",
title = "Rewrite Games",
crossref = "RTA:2002",
pages = "144--158",
abstract = "In this note, we propose some challenging problems
related to certain rewrite games. In particular, we
re-formulate an open problem from combinatorial game
theory (do all finite octal games have an ultimately
periodic Sprague-Grundy sequence?) as a question about
rationality of some tree languages. \\
We propose to attack this question by methods from set
constraint systems, and show some cases where this
works directly. \\
Finally we present rewrite games from to combinatory
logic, and their relation to algebraic tree
languages.",
}
@InProceedings{SeveriDeVries:RTA:2002,
author = "Paula Severi and Fer-Jan de Vries",
title = "An extensional {B}{\"o}hm model",
crossref = "RTA:2002",
pages = "159--173",
abstract = "We show the existence of an infinitary confluent and
normalising extension of the finite extensional lambda
calculus with beta and eta. Besides infinite beta
reductions also infinite eta reductions are possible in
this extension, and terms without head normal form can
be reduced to bottom. As corollaries we obtain a
simple, syntax based construction of an extensional
B{\"o}hm model of the finite lambda calculus; and a
simple, syntax based proof that two lambda terms have
the same semantics in this model if and only if they
have the same eta-B{\"o}hm tree if and only if they are
observationally equivalent wrt to beta normal forms.
\\
The confluence proof reduces confluence of beta, bottom
and eta via infinitary commutation and postponement
arguments to confluence of beta and bottom and
confluence of eta. \\
We give counterexamples against confluence of similar
extensions based on the identification of the terms
without weak head normal form and the terms without top
normal form (rootactive terms) respectively.",
}
@InProceedings{Forest:RTA:2002,
author = "Julien Forest",
title = "A Weak Calculus with Explicit Operators for Pattern
Matching and Substitution",
crossref = "RTA:2002",
pages = "174--191",
abstract = "In this paper we propose a \textbf{Weak Lambda
Calculus} having explicit operators for \textbf{Pattern
Matching} and \textbf{Substitution}. This formalism is
able to specify functions defined by cases via pattern
matching constructors as done by most modern functional
programming languages such as OCAML. We show the main
property enjoyed by this lambda calculus, namely
subject reduction, confluence and strong
normalization.",
}
@InProceedings{LiangNadathur:RTA:2002,
author = "Chuck Liang and Gopalan Nadathur",
title = "Tradeoffs in the Intensional Representation of Lambda
Terms",
type = "Application Paper",
crossref = "RTA:2002",
pages = "192--206",
abstract = "Higher-order representations of objects such as
programs, specifications and proofs are important to
many metaprogramming and symbolic computation tasks.
Systems that support such representations often depend
on the implementation of an intensional view of the
terms of suitable typed lambda calculi. Refined lambda
calculus notations have been proposed that can be used
in realizing such implementations. There are, however,
choices in the actual deployment of such notations
whose practical consequences are not well understood.
Towards addressing this lacuna, the impact of three
specific ideas is examined: the de Bruijn
representation of bound variables, the explicit
encoding of substitutions in terms and the annotation
of terms to indicate their independence on external
abstractions. Qualitative assessments are complemented
by experiments over actual computations. The empirical
study is based on $\lambda$Prolog programs executed
using suitable variants of a low level, abstract
machine based implementation of this language.",
}
@InProceedings{DeharbeEA:RTA:2002,
author = "David D{\'e}harbe and Anamaria Martins Moreira and
Christophe Ringeissen",
title = "Improving Symbolic Model Checking by Rewriting
Temporal Logic Formulae",
crossref = "RTA:2002",
pages = "207--221",
abstract = "A factor in the complexity of conventional algorithms
for model checking Computation Tree Logic (CTL) is the
size of the formulae, and, more precisely, the number
of fixpoint operators. This paper addresses the
following questions: given a CTL formula $f$, is there
an equivalent formula with fewer fixpoint operators?
and how term rewriting techniques may be used to find
it? Moreover, for some sublogics of CTL, e.g. the
sub-logic NFCTL (no fixpoint computation tree logic),
more efficient verification procedures are available.
This paper also addresses the problem of testing
whether an expression belongs or not to NFCTL, and
providing support in the choice of the most efficient
amongst different available verification algorithms. In
this direction, we propose a rewrite system modulo
$AC$, and discuss its implementation in Elan, showing
how this rewriting process can be plugged in a formal
verification tool.",
}
@InProceedings{Voigtlaender:RTA:2002,
author = "Janis Voigtl{\"a}nder",
title = "Conditions for Efficiency Improvement by Tree
Transducer Composition",
crossref = "RTA:2002",
pages = "222--236",
abstract = "We study the question of efficiency improvement or
deterioration for a semantic-preserving program
transformation technique based on macro tree transducer
composition. By annotating functional programs to
reflect the internal property `computation time'
explicitly in the computed output, and by manipulating
such annotations, we formally prove syntactic
conditions under which the composed program is
guaranteed to be more efficient than the original
program, with respect to call-by-need reduction to
normal form. The developed criteria can be checked
automatically, and thus are suitable for integration
into an optimizing functional compiler.",
}
@InProceedings{BravenboerVisser:RTA:2002,
author = "Martin Bravenboer and Eelco Visser",
title = "Rewriting Strategies for Instruction Selection",
type = "Application Paper",
crossref = "RTA:2002",
pages = "237--251",
abstract = "Instruction selection (mapping IR trees to machine
instructions) can be expressed by means of rewrite
rules. Typically, such sets of rewrite rules are highly
ambiguous. Therefore, standard rewriting engines based
on fixed, exhaustive strategies are not appropriate for
the execution of instruction selection. Code generator
generators use special purpose implementations
employing dynamic programming. In this paper we show
how rewriting strategies for instruction selection can
be encoded concisely in Stratego, a language for
program transformation based on the paradigm of
programmable rewriting strategies. This embedding
obviates the need for a language dedicated to code
generation, and makes it easy to combine code
generation with other optimizations.",
}
@InProceedings{KirchnerBournez:RTA:2002,
author = "Olivier Bournez and Claude Kirchner",
title = "Probabilistic rewrite strategies: Applications to
{ELAN}",
crossref = "RTA:2002",
pages = "252--266",
abstract = "Recently rule based languages focussed on the use of
rewriting as a modeling tool which results in making
specifications executable. To extend the modeling
capabilities of rule based languages, we explore the
possibility of making the rule applications subject to
probabilistic choices. \\
We propose an extension of the Elan strategy language
to deal with randomized systems. We argue through
several examples that we propose indeed a natural
setting to model systems with randomized choices. This
leads us to interesting new problems, and we address
the generalization of the usual concepts in abstract
reduction systems to randomized systems.",
}
@InProceedings{Geser:RTA:2002,
author = "Alfons Geser",
title = "Loops of Superexponential Lengths in One-Rule String
Rewriting",
crossref = "RTA:2002",
pages = "267--280",
abstract = "Loops are the most frequent cause of non-termination
in string rewriting. In the general case,
non-terminating, non-looping string rewriting systems
exist, and the uniform termination problem is
undecidable. For rewriting with only one string
rewriting rule, it is unknown whether non-terminating,
non-looping systems exist and whether uniform
termination is decidable. If in the one-rule case,
non-termination is equivalent to the existence of
loops, as McNaughton conjectures, then a decision
procedure for the existence of loops also solves the
uniform termination problem. As the existence of loops
of bounded lengths is decidable, the question is raised
how long shortest loops may be. We show that string
rewriting rules exist whose shortest loops have
superexponential lengths in the size of the rule.",
}
@InProceedings{TahhanBittar:RTA:2002,
author = "Elias Tahhan-Bittar",
title = "Recursive Derivational Length Bounds for Confluent
Term Rewrite Systems",
crossref = "RTA:2002",
pages = "281--295",
abstract = "Let $F$ be a signature and $\mathcal{R}$ a term
rewrite system on ground terms of $F$. We define the
concepts of a context-free potential redex in a term
and of bounded confluent terms. We bound recursively
the lengths of derivations of a bounded confluent term
$t$ by a function of the length of derivations of
context-free potential redexes of this term. We define
the concept of inner redex and we apply the recursive
bounds that we obtained to prove that, whenever
$\mathcal{R}$ is a confluent overlay term rewrite
system, the derivational length bound for arbitrary
terms is an iteration of the derivational length bound
for inner redexes.",
}
@InProceedings{Lucas:RTA:2002,
author = "Salvador Lucas",
title = "Termination of (Canonical) Context-Sensitive
Rewriting",
crossref = "RTA:2002",
pages = "296--310",
abstract = "Context-sensitive rewriting (csr) is a restriction of
rewriting which forbids reductions on selected
arguments of functions. A {\em replacement map}
discriminates, for each symbol of the signature, the
argument positions on which replacements are allowed.
If the replacement restrictions are less restrictive
than those expressed by the so-called {\em canonical}
replacement map, then csr can be used for computing
(infinite) normal forms of terms. Termination of such
{\em canonical} csr is desirable when using csr for
these purposes. Existing transformations for proving
termination of csr fulfill a number of new properties
when used for proving termination of canonical csr.",
}
@InProceedings{CharatonikTalbot:RTA:2002,
author = "Witold Charatonik and Jean-Marc Talbot",
title = "Atomic Set Constraints with Projection",
crossref = "RTA:2002",
pages = "311--325",
abstract = "We investigate a class of set constraints defined as
atomic set constraints augmented with projection. This
class subsumes some already studied classes such as
atomic set constraints with left-hand side projection
and INES constraints. All these classes enjoy the nice
property that satisfiability can be tested in cubic
time. This is in contrast to several other classes of
set constraints, such as definite set constraints and
positive set constraints, for which satisfiability
ranges from DEXPTIME-complete to NEXPTIME-complete.
However, these latter classes allow set operators such
as intersection or union which is not the case for the
class studied here. In the case of atomic set
constraints with projection one might expect that
satisfiability remains polynomial. Unfortunately, we
show that that the satisfiability problem for this
class is no longer polynomial, but CoNP-hard.
Furthermore, we devise a PSPACE algorithm to solve this
satisfiability problem.",
}
@InProceedings{LevyVillaret:RTA:2002,
author = "Jordi Levy and Mateu Villaret",
title = "Currying Second-Order Unification",
crossref = "RTA:2002",
pages = "326--339",
abstract = "The Curry form of a term, like $f(a,b)$, allows us to
write it, using just a single binary function symbol,
as $@(@(f,a),b)$. Using this technique we prove that
the signature is not relevant in second-order
unification, and conclude that one binary symbol is
enough. \\
By currying variable applications, like $X(a)$, as
$@(X,a)$, we can transform second-order terms into
first-order terms, but we have to add beta-reduction as
a theory. This is roughly what it is done in explicit
unification. We prove that by currying only constant
applications we can reduce second-order unification to
second-order unification with just one binary function
symbol. Both problems are already known to be
undecidable, but applying the same idea to context
unification, for which decidability is still unknown,
we reduce the problem to context unification with just
one binary function symbol. \\
We also discuss about the difficulties of applying the
same ideas to third or higher order unification.",
}
@InProceedings{Wierzbicki:RTA:2002,
author = "Daniel J. Dougherty and Tomasz Wierzbicki",
title = "A Decidable Variant of Higher Order Matching",
crossref = "RTA:2002",
pages = "340--351",
abstract = "A lambda term is \emph{$k$-duplicating} if every
occurrence of a lambda abstractor binds at most $k$
variable occurrences. We prove that the problem of
higher order matching where solutions are required to
be $k$-duplicating (but with no constraints on the
problem instance itself) is decidable. We also show
that the problem of higher order matching in the affine
lambda calculus (where both the problem instance and
the solutions are constrained to be 1-duplicating) is
in NP, generalizing de~Groote's result for the linear
lambda calculus.",
}
@InProceedings{BaaderTinelli:RTA:2002,
author = "Franz Baader and Cesare Tinelli",
title = "Combining Decision Procedures for Positive Theories
Sharing Constructors",
crossref = "RTA:2002",
pages = "352--366",
abstract = "This paper addresses the following combination
problem: given two equational theories $E_1$ and $E_2$
whose positive theories are decidable, how can one
obtain a decision procedure for the positive theory of
$E_1\cup E_2$? For theories over disjoint signatures,
this problem was solved by Baader and Schulz in 1995.
\\
This paper is a first step towards extending this
result to the case of theories sharing constructors.
Since there is a close connection between positive
theories and unification problems, this also extends to
the non-disjoint case the work on combining decision
procedures for unification modulo equational
theories.",
}
@InProceedings{Pol:RTA:2002,
author = "Jaco van de Pol",
title = "{JIT}ty: {A} rewriter with strategy annotations",
type = "Tool Demonstration",
crossref = "RTA:2002",
pages = "367--370",
abstract = "We demonstrate {\tt JITty}, a simple rewrite
implementation with strategy annotations, along the
lines of the Just-In-Time rewrite strategy. Our tool
has the following distinguishing features: \\ - It
provides the flexibility of user defined strategy
annotations, which specify the order of normalizing
arguments and applying rewrite rules. \\ - Strategy
annotations are checked for correctness, and it is
guaranteed that all produced results are normal forms
w.r.t.~the underlying TRS. \\ - The tool is
`light-weight' with compact but fast code. \\ - A TRS
is interpreted, rather than compiled, so the tool has a
short start-up time and is portable to many
platforms.",
}
@InProceedings{Durand:RTA:2002,
author = "Ir{\`e}ne Durand",
title = "Autowrite",
type = "Tool Demonstration",
crossref = "RTA:2002",
pages = "371--375",
abstract = "Autowrite is an experimental tool written in Common
Lisp for checking properties of TRSs. It was initially
designed to check sequentiality properties of TRSs. For
this purpose, it implements the tree automata
constructions used in [J96,DM97,DM98,NT99] and many
useful operations on terms, TRSs and tree automata
(unfortunaletly not all yet integrated into the
graphical interface).",
}
@InProceedings{RetyLecland:RTA:2002,
author = "Benoit Lecland and Pierre R{\'e}ty",
title = "{TTSLI}: an Implementation of Tree-Tuple Synchronized
Languages",
type = "Tool Demonstration",
crossref = "RTA:2002",
pages = "376--379",
abstract = "Tree-Tuple Synchronized Languages have first been
introduced by means of Tree-Tuple Synchronized Grammars
(TTSG), and have been reformulated recently by means of
(so-called) Constraint Systems (CS), which allowed to
prove more properties. \\
TTSLI is an implementation of constraint systems,
together with the main operations. It is written in
Java, and is available on Lecland's web page.",
}
@InProceedings{Lippi:RTA:2002,
author = "Sylvain Lippi",
title = "in$^2$: {A} Graphical Interpreter for Interaction
Nets",
type = "Tool Demonstration",
crossref = "RTA:2002",
pages = "380--385",
abstract = "in$^2$ can be considered as an attractive and didactic
tool to approach the interaction net paradigm. But it
is also an implementation in $C$ of the core of a real
programming language featuring a user-friendly
graphical syntax and an efficient garbage collector
free execution.",
}
@Proceedings{RTA:2002,
editor = "Sophie Tison",
title = "Rewriting Techniques and Applications",
booktitle = "Rewriting Techniques and Applications",
conference = "13th International Conference",
key = "RTA",
year = "2002",
month = jul # " 22-24",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2378",
publisher = "Springer-Verlag",
ISBN = "3-540-43916-1",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RV - Runtime Verification
% ====================
@InProceedings{BhargavanGunter:RV:2002,
author = "Karthikeyan Bhargavan and Carl A. Gunter",
title = "Requirements for a Practical Network Event Recognition
Language",
crossref = "RV:2002",
pages = "1--20",
}
@InProceedings{BroerkensMoeller:RV:2002,
author = "Mark Br{\"o}rkens and Michael M{\"o}ller",
title = "Dynamic Event Generation for Runtime Checking using
the {JDI}",
crossref = "RV:2002",
pages = "21--35",
}
@InProceedings{FinkbeinerSankaranarayananSipma:RV:2002,
author = "Bernd Finkbeiner and Sriram Sankaranarayanan and Henny
B. Sipma",
title = "Collecting Statistics over Runtime Executions",
crossref = "RV:2002",
pages = "36--55",
}
@InProceedings{KaraormanAbercrombie:RV:2002,
author = "Murat Karaorman and Parker Abercrombie",
title = "j{C}ontractor: Bytecode Instrumentation Techniques for
Implementing Design by Contract in {J}ava",
crossref = "RV:2002",
pages = "56--80",
}
@InProceedings{KimKannanLeeSokolskyViswanathan:RV:2002,
author = "Moonjoo Kim and Sampath Kannan and Insup Lee and Oleg
Sokolsky and Mahesh Viswanathan",
title = "Computational Analysis of Run-time Monitoring",
crossref = "RV:2002",
pages = "81--95",
}
@InProceedings{KimLeeSammapunShin:RV:2002,
author = "Moonjoo Kim and Insup Lee and Usa Sammapun and Jangwoo
Shin and Oleg Sokolsky",
title = "Monitoring, Checking, and Steering of Real-Time
Systems",
crossref = "RV:2002",
pages = "96--112",
}
@InProceedings{LevySaidiUribe:RV:2002,
author = "Joshua Levy and Hassen Sa{\"\i}di and Thomas Uribe",
title = "Combining Monitors for Run-time System Verification",
crossref = "RV:2002",
pages = "113--128",
}
@InProceedings{Peled:RV:2002,
author = "Doron A. Peled",
title = "Tracing the executions of concurrent programs",
type = "Invited talk",
crossref = "RV:2002",
pages = "129--142",
}
@InProceedings{Stoller:RV:2002,
author = "Scott Stoller",
title = "Testing Concurrent {J}ava Programs using Randomized
Scheduling",
crossref = "RV:2002",
pages = "143--158",
}
@InProceedings{YongHorwitz:RV:2002,
author = "Suan Hsi Yong and Susan Horwitz",
title = "Reducing the Overhead of Dynamic Analysis",
crossref = "RV:2002",
pages = "159--179",
}
@InProceedings{ZuckPnueliGoldbergHu:RV:2002,
author = "Lenore Zuck and Amir Pnueli and Yi Fang and Benjamin
Goldberg and Ying Hu",
title = "Translation and Run-Time Validation of Optimized
Code",
crossref = "RV:2002",
pages = "180--201",
}
@Proceedings{RV:2002,
editor = "Klaus Havelund and Grigore Ro{\c{s}}u",
title = "Runtime Verification",
booktitle = "Runtime Verification",
conference = "satellite workshop to CAV'02",
key = "RV",
pages = "iv+20",
year = "2002",
month = jul # " 26",
venue = "Copenhagen, Denmark",
series = "Electronic Notes in Theoretical Computer Science",
volume = "70",
publisher = "Elsevier Science",
note = "Also DIKU technical report 02-14 from University of
Copenhagen",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SAVE - Specification, Analysis and Validation for Emerging Technologies in...
% ======================================================================
@InProceedings{MC1-Sharygina:SAVE:2002,
author = "Natasha Sharygina and James C. Browne",
title = "Model Checking Large-Scale Software via Abstraction of
Loop Transitions",
crossref = "SAVE:2002",
pages = "(unpaginated)",
abstract = "This paper outlines an on-going project on the
abstract data model checking of large-scale programs.
The focus of the paper is the data abstraction
algorithm that is targeted to minimize the contribution
of the loop executions to the program state space. The
loop abstraction is defined as the syntactic program
transformation that results in the sound representation
of the concrete program. We demonstrate the loop
abstraction algorithm in the context of the integrated
software design and model-checking.",
}
@InProceedings{MC2-Armando:SAVE:2002,
author = "Alessandro Armando and Pasquale De Lucia",
title = "Symbolic Model-Checking of Linear Programs",
crossref = "SAVE:2002",
pages = "(unpaginated)",
abstract = "A fundamental problem in the development of
model-checking techniques for sequential programs is
the identification of models (analogous to the finite
state machines used for modeling hardware circuits) for
which reasonably simple abstractions from conventional
programming languages (such as C and Java) as well as
efficient model-checking procedures do exist. Previous
work developed at Microsoft Research proposed boolean
programs as a model for sequential programs and a
model-checking procedure for this family of programs.
In this paper we build on top and extend on these ideas
and propose linear programs as an alternative model for
sequential programs. We argue that linear programs
offer a better model in many cases as they provide a
level of abstraction closer to programs arising in many
practical situations. We show how a model-checking
procedure for linear programs can be built on top of a
constraint solver for linear arithmetics and present
some experimental results obtained with our prototype
implementation of the model-checker.",
}
@InProceedings{Babylon:SAVE:2002,
author = "Pierluigi Ammirati and Giorgio Delzanno and Pierre
Ganty and Gilles Geeraerts and Jean-Fran{\c c}ois
Raskin and Laurent Van Begin",
title = "Babylon: An Integrated Toolkit for the Specification
and Verification of Parameterized Systems",
crossref = "SAVE:2002",
pages = "(unpaginated)",
}
@InProceedings{S1-Aziz:SAVE:2002,
author = "Benjamin Aziz and G. W. Hamilton",
title = "A Privacy Analysis for the $\pi$-calculus: The
Denotational Approach",
crossref = "SAVE:2002",
pages = "(unpaginated)",
abstract = "We present a non-uniform static analysis for the
$\pi$-calculus that is built on a denotational
semantics of the language and is useful in detecting
instances of information leakage and insecure
communications in mobile systems with multi-level
security policies. To ensure the termination of the
analysis, we propose a safe abstraction, which ensures
a finite number of names are generated by any process.
We also describe a tool called Picasso that implements
the analysis.",
}
@InProceedings{S2-Martinelli:SAVE:2002,
author = "Fabio Martinelli",
title = "Symbolic Partial Model Checking for Security
Analysis",
crossref = "SAVE:2002",
pages = "(unpaginated)",
}
@InProceedings{S3-Etalle:SAVE:2002,
author = "R. Corin and Sandro Etalle",
title = "An improved Constraint-Based System for the
verification of Security Protocols",
crossref = "SAVE:2002",
pages = "(unpaginated)",
}
@Proceedings{SAVE:2002,
editor = "Giorgio Delzanno and Sandro Etalle and Maurizio
Gabbrielli",
title = "Specification, Analysis and Validation for Emerging
Technologies in Computational Logic",
booktitle = "Specification, Analysis and Validation for Emerging
Technologies in Computational Logic",
conference = "2nd International Workshop",
key = "SAVE",
year = "2002",
month = jul # " 27",
venue = "Copenhagen, Denmark",
series = "Datalogiske Skrifter",
volume = "94",
institution = "Roskilde University",
address = "Roskilde, Denmark",
ISSN = "0109-9779",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TABLEAUX - Automated Reasoning with Analytic Tableaux and Related Methods
% ==============================================================
@InProceedings{Baaz:FLoC:2002:CADE+*TABLEAUX,
author = "Matthias Baaz",
title = "Proof Analysis by Resolution",
type = "Invited talk",
crossref = "TABLEAUX:2002",
pages = "1",
abstract = "Proof analysis of existing proofs is one of the main
sources of scientific progress in mathematics: new
concepts can be obtained e.g. by denoting explicit
definitions in proof parts and axiomatizing them as new
mathematical objects in their own right. (The
development of the concept of integral is a well known
example.) All forms of proof analysis are intended to
make informations implicit in a proof explicit i.e.
visible. Logical proof analysis is mainly concerned
with the implicit constructive content of more or less
formalized proofs. In this paper, we concentrate on
automatizable logical proof analysis in first-order
logic by means of incooperating resolution.",
}
@InProceedings{Miller:TABLEAUX:2002,
author = "Dale Miller and Elaine Pimentel",
title = "Using linear logic to reason about sequent systems",
type = "Invited talk",
crossref = "TABLEAUX:2002",
pages = "2--23",
abstract = "Linear logic can be used as a meta-logic for the
specification of some sequent calculus proof systems.
We explore in this paper properties of such linear
logic specifications. We show that derivability of one
proof system from another has a simple decision
procedure that is implemented simply via bounded logic
programming search. We also provide conditions to
ensure that an encoded proof system has the
cut-elimination property and show that this can be
decided again by simple, bounded proof search
algorithms.",
}
@InProceedings{BaazCiabattoni:TABLEAUX:2002,
author = "Matthias Baaz and Agata Ciabattoni",
title = "A {S}ch{\"u}tte-{T}ait style cut-elimination proof for
first-order {G}{\"o}del logic",
crossref = "TABLEAUX:2002",
pages = "24--37",
abstract = "We present a Sch{\"u}tte-Tait style cut-elimination
proof for the hypersequent calculus {\bf HIF} for
first-order G{\"o}del logic. This proof allows to bound
the depth of the resulting cut-free derivation by
$4_{\rho(d)}^{|d|}$, where $|d|$ is the depth of the
original derivation and $\rho(d)$ the maximal
complexity of cut-formulas in it. We compare this
Sch{\"u}tte-Tait style cut-elimination proof to a
Gentzen style proof.",
}
@InProceedings{BlackburnMarx:TABLEAUX:2002,
author = "Patrick Blackburn and Maarten Marx",
title = "Tableaux for Quantified Hybrid Logic",
crossref = "TABLEAUX:2002",
pages = "38--52",
abstract = "We present a (sound and complete) tableau calculus for
Quantified Hybrid Logic ({\it QHL}). {\it QHL} is an
extension of orthodox quantified modal logic: as well
as the usual $\Box$ and $\Diamond$ modalities it
contains names for (and variables over) states,
operators $@_s$ for asserting that a formula holds at a
named state, and a binder $\downarrow$ that binds a
variable to the current state. The first-order
component contains equality and rigid and non-rigid
designators. As far as we are aware, ours is the first
tableau system for {\it QHL}. \\
Completeness is established via a variant of the
standard translation to first-order logic. More
concretely, a valid {\it QHL}-sentence is translated
into a valid first-order sentence in the correspondence
language. As it is valid, there exists a first-order
tableau proof for it. This tableau proof is then
converted into a {\it QHL} tableau proof for the
original sentence. In this way we recycle a well-known
result (completeness of first-order logic) instead of a
well-known proof. \\
The tableau calculus is highly flexible. We only
present it for the constant domain semantics, but
slight changes render it complete for varying,
expanding or contracting domains. Moreover,
completeness with respect to specific frame classes can
be obtained simply by adding extra rules or axioms
(this can be done for every first-order definable class
of frames which is closed under and reflects generated
subframes).",
}
@InProceedings{Chetcuti-Sperandio:TABLEAUX:2002,
author = "Nathalie Chetcuti-Sperandio",
title = "Tableau-Based Automated Deduction for Duration
Calculus",
crossref = "TABLEAUX:2002",
pages = "53--69",
abstract = "Duration Calculus is a temporal logic introduced to
specify real-time systems. It is a very expressive but
undecidable logic. In this paper we turn our attention
to a decidable fragment for which we develop a
tableau-based decision method taking into account some
semantic restrictions.",
}
@InProceedings{CialdeaMayerLimongelli:TABLEAUX:2002,
author = "Marta Cialdea Mayer and Carla Limongelli",
title = "Linear Time Logic, Conditioned Models and Planning
with Incomplete Knowledge",
crossref = "TABLEAUX:2002",
pages = "70--84",
abstract = "The ``planning as satisfiability'' paradigm, which
reduces solving a planning problem $P$ to the search of
a model of a logical description of $P$, relies on the
assumption that the agent has complete knowledge and
control over the world. This work faces the problem of
planning in the presence of incomplete information
and/or exogenous events, still keeping inside the
``planning as satisfiability'' paradigm, in the context
of linear time logic. \\
We give a logical characterization of a ``conditioned
model'', which represents a plan solving a given
problem together with a set of ``conditions'' that
guarantee its executability. During execution,
conditions have to be checked by means of sensing
actions. When a condition turns out to be false, a
different ``conditioned plan'' must be considered. A
whole conditional plan is represented by a set of
conditioned models. The interest of splitting a
conditional plan into significant sub-parts is due to
the heavy computational complexity of conditional
planning. \\
The paper presents an extension of the standard tableau
calculus for linear time logic, allowing one to extract
from a single open branch a conditioned model of the
initial set of formulae, i.e. a partial description of
a model and a set of conditions $U$ guaranteeing its
``executability''. As can be expected, if $U$ is
required to be minimal, the analysis of a single branch
is not sufficient. We show how a global view on the
whole tableau can be used to prune $U$ from redundant
conditions. In any case, if the calculus is to be used
with the aim of producing the whole conditional plan
off-line, a complete tableau must be built. On the
other hand, a single conditioned model can be used when
planning and execution (with sensing actions) are
intermingled. In that case, the requirement for
minimality can reasonably be relaxed.",
}
@InProceedings{DegtyarevFisherKonev:TABLEAUX:2002,
author = "Anatoli Degtyarev and Michael Fisher and Boris Konev",
title = "A Simplified Clausal Resolution Procedure for
Propositional Linear-Time Temporal Logic",
crossref = "TABLEAUX:2002",
pages = "85--99",
abstract = "The clausal resolution method for propositional
linear-time temporal logics is well known and provides
the basis for a number of temporal provers. The method
is based on an intuitive clausal form, called SNF,
comprising three main clause types and a small number
of resolution rules. In this paper, we show how the
normal form can be radically simplified and,
consequently, how a simplified clausal resolution
method can be defined for this important variety of
logic.",
}
@InProceedings{EiterKlotzTompitsWoltran:TABLEAUX:2002,
author = "Thomas Eiter and Volker Klotz and Hans Tompits and
Stefan Woltran",
title = "Modal Nonmonotonic Logics Revisited: Efficient
Encodings for the Basic Reasoning Tasks",
crossref = "TABLEAUX:2002",
pages = "100--114",
abstract = "Modal nonmonotonic logics constitute a well-known
family of knowledge-representation formalisms capturing
ideally rational agents reasoning about their own
beliefs. Although these formalisms are extensively
studied from a theoretical point of view, most of these
approaches lack generally available solvers thus far.
In this paper, we show how variants of Moore's
autoepistemic logic can be axiomatised by means of
quantified Boolean formulas (QBFs). More specifically,
we provide polynomial reductions of the basic reasoning
tasks associated with these logics into the evaluation
problem of QBFs. Since there are now efficient
QBF-solvers, this reduction technique yields a
practicably relevant approach to build prototype
reasoning systems for these formalisms. We incorporated
our encodings within the system {\sf QUIP} and tested
their performance on a class of benchmark problems
using different underlying QBF-solvers.",
}
@InProceedings{FerrariFiorentiniFiorino:TABLEAUX:2002,
author = "Mauro Ferrari and Camillo Fiorentini and Guido
Fiorino",
title = "Tableau calculi for the logics of finite k-ary trees",
crossref = "TABLEAUX:2002",
pages = "115--129",
abstract = "We present tableau calculi for the logics
$\mathbf{D}_k$ ($k\geq 2$) semantically characterized
by the classes of Kripke models built on finite $k$-ary
trees. Our tableau calculi use the signs $\mathbf{T}$
and $\mathbf{F}$, some tableau rules for Intuitionistic
Logic and two rules formulated in a hypertableau
fashion. We prove the Soundness and Completeness
Theorems for our calculi. Finally, we use them to prove
the main properties of the logics $\mathbf{D}_k$, in
particular their constructivity and their
decidability.",
}
@InProceedings{Giese:TABLEAUX:2002,
author = "Martin Giese",
title = "A Model Generation Style Completeness Proof for
Constraint Tableaux with Superposition",
crossref = "TABLEAUX:2002",
pages = "130--144",
abstract = "We present a calculus that integrates equality
handling by superposition into a free variable tableau
calculus. We prove completeness of this calculus by an
adaptation of the model generation technique commonly
used for completeness proofs of resolution calculi,
e.g. by Bachmair and Ganzinger or Nieuwenhuis and
Rubio. The calculi and the completeness proof are
compared to earlier results of Degtyarev and
Voronkov.",
}
@InProceedings{Hladik:TABLEAUX:2002,
author = "Jan Hladik",
title = "Implementation and Optimization of a Tableau Algorithm
for the Guarded Fragment",
crossref = "TABLEAUX:2002",
pages = "145--159",
abstract = "In this paper, we present \textsc{Saga}, the
implementation of a tableau-based
\textbf{S}atisfiability \textbf{A}lgorithm for the
\textbf{G}u\textbf{a}rded Fragment ($\mathcal{GF}$).
Satisfiability for $\mathcal{GF}$ with finite signature
is \textsc{ExpTime}-complete and therefore
theoretically intractable, but existing tableau-based
systems for \textsc{ExpTime}-complete description and
modal logics perform well for many realistic knowledge
bases. We implemented and evaluated several
optimisations used in description logic systems, and
our results show that with an efficient combination,
\textsc{Saga} can compete with existing highly
optimised systems for description logics and first
order logic.",
}
@InProceedings{Letz:TABLEAUX:2002,
author = "Reinhold Letz",
title = "Lemma and Model Caching in Decision Procedures for
Quantified Boolean Formulas",
crossref = "TABLEAUX:2002",
pages = "160--175",
abstract = "The increasing role of quantified Boolean logic in
many applications calls for practically efficient
decision procedures. One of the most promising
paradigms is the semantic tree format implemented in
the style of the DPLL procedure. In this paper,
so-called learning techniques like intelligent
backtracking and caching of lemmas which proved useful
in the pure propositional case are generalised to the
quantified Boolean case and the occuring differences
are discussed. Due to the strong restriction of the
variable selection in semantic tree procedures for
quantified Boolean formulas, learning methods are more
important than in the propositional case, as we
demonstrate. Furthermore, in addition to the caching of
lemmas, significant advances can be achieved by
techniques based on the caching of models, too. The
theoretical effect of these improvements is illustrated
by a comparison of the search spaces on pathological
examples. We also describe the basic features of the
system Semprop, which is an efficient implementation of
(some of) the developed techniques, and give the
results of an experimental evaluation of the system on
a number of practical examples.",
}
@InProceedings{LetzStenz:TABLEAUX:2002,
author = "Reinhold Letz and Gernot Stenz",
title = "Integration of Equality Reasoning into the
Disconnection Calculus",
crossref = "TABLEAUX:2002",
pages = "176--190",
abstract = "Equality handling has always been a traditional
weakness of tableau calculi because the typical
refinements of those calculi were not compatible with
the most successful methods for equality handling. The
disconnection tableau calculus represents a new
confluent framework well suited for the integration of
a large class of different methods for equality
handling, as we demonstrate in this paper. We consider
both saturation based and goal-oriented methods for
equality handling. We also show how specialized
equality handling can affect the properties of the
calculus at the example of the well-known regularity
condition. All the presented approaches of equality
handling have been implemented in the theorem prover
DCTP and we present the results of an experimental
evaluation.",
}
@InProceedings{MetcalfeOlivettiGabbay:TABLEAUX:2002,
author = "George Metcalfe and Nicola Olivetti and Dov Gabbay",
title = "Analytic sequent calculi for abelian and {L}ukasiewicz
logics",
crossref = "TABLEAUX:2002",
pages = "191--205",
abstract = "In this paper we present the first labelled and
unlabelled analytic sequent calculi for abelian logic
A, the logic of lattice-ordered abelian groups with
characterisic model Z, motivated by Meyer and Slaney as
a logic of relevance and Casari as a logic of
comparison. We also show that the so-called material
fragment of A coincides with Lukasiewicz's
infinite-valued logic L, hence giving us as a
significant by-product, labelled and unlabelled
analytic sequent calculi for L.",
}
@InProceedings{Nguyen:TABLEAUX:2002,
author = "Linh Anh Nguyen",
title = "Analytic Tableau Systems for Propositional Bimodal
Logics of Knowledge and Belief",
crossref = "TABLEAUX:2002",
pages = "206--220",
abstract = "We give sound and complete analytic tableau systems
for the propositional bimodal logics $\mathbb{KB}$,
$\mathbb{KB}_{\!^{\_}C}$, $\mathbb{KB}_{\!^{\_}5}$, and
$\mathbb{KB}_{\!^{\_}5C}$. These logics have two
universal modal operators $\mathbb{K}\,$ and
$\mathbb{B}\,$, where $\mathbb{K}\,$ stands for knowing
and $\mathbb{B}\,$ stands for believing. The logic
$\mathbb{KB}$ is a combination of the modal logic
\textit{S}5 (for $\mathbb{K}\,$) and \textit{KD}45 (for
$\mathbb{B}\,$) with the interaction axioms $I:
\mathbb{K}\,\phi \to \mathbb{B}\,\phi$ and $C:
\mathbb{B}\,\phi \to \mathbb{K}\,\mathbb{B}\,\phi$. The
logics $\mathbb{KB}_{\!^{\_}C}$,
$\mathbb{KB}_{\!^{\_}5}$, $\mathbb{KB}_{\!^{\_}5C}$ are
obtained from $\mathbb{KB}$ respectively by deleting
the axiom $C$ (for $\mathbb{KB}_{\!^{\_}C}$), the
axioms 5 (for $\mathbb{KB}_{\!^{\_}5}$), and both of
the axioms $C$ and 5 (for $\mathbb{KB}_{\!^{\_}5C}$).
As analytic sequent-like tableau systems, our calculi
give simple decision procedures for reasoning about
both knowledge and belief in the mentioned logics.",
}
@InProceedings{Petermann:TABLEAUX:2002,
author = "Uwe Petermann",
title = "A Confluent Theory Connection Calculus",
crossref = "TABLEAUX:2002",
pages = "221--234",
abstract = "In the present paper we combine two different
enhancements of connection method based theorem proving
calculi: a confluent connection calculus of
Baumgartner, Eisinger and Furbach and our approach for
building-in theories into connection calculi.",
}
@InProceedings{Sofronie-Stokkermans:TABLEAUX:2002,
author = "Viorica Sofronie-Stokkermans",
title = "Deciding uniform word problems involving bridging
operators on bounded distributive lattices",
crossref = "TABLEAUX:2002",
pages = "235--249",
abstract = "In this paper we analyze some fragments of the
universal theory of distributive lattices with many
sorted bridging operators. Our interest in such
algebras is motivated by the fact that, in description
logics, numerical features are often expressed by using
maps that associate numerical values to sets (more
generally, to lattice elements). We first establish a
link between satisfiability of universal sentences with
respect to algebraic models and satisfiability with
respect to certain classes of relational structures. We
use these results for giving a method for translation
to clause form of universal sentences, and provide some
decidability results based on the use of resolution or
hyperresolution. Links between hyperresolution and
tableau methods are also discussed, and a tableau
procedure for checking satisfiability of formulae of
type $t_1 \leq t_2$ is obtained by using a
hyperresolution calculus.",
}
@InProceedings{CateShan:TABLEAUX:2002,
author = "Balder ten Cate and Chung-chieh Shan",
title = "Question answering: from partitions to prolog",
crossref = "TABLEAUX:2002",
pages = "251--265",
abstract = "We implement Groenendijk and Stokhof's partition
semantics of questions in a simple question answering
algorithm. The algorithm is sound, complete, and based
on tableau theorem proving. The algorithm relies on a
syntactic characterization of answerhood: Any answer to
a question is equivalent to some formula built up only
from instances of the question. We prove this
characterization by translating the logic of
interrogation to classical predicate logic and applying
Craig's interpolation theorem.",
}
@InProceedings{ThionCerritoCialdeaMayer:TABLEAUX:2002,
author = "Virginie Thion and Serenella Cerrito and Marta Cialdea
Mayer",
title = "A General Theorem Prover for Quantified Modal Logics",
crossref = "TABLEAUX:2002",
pages = "266--280",
abstract = "The main contribution of this work is twofold. It
presents a modular tableau calculus, in the
free-variable style, treating the main domain variants
of quantified modal logic and dealing with languages
where rigid and non-rigid designation can coexist. The
calculus uses, to this end, light and simple semantical
annotations. Such a general proof-system results from
the fusion into a unified framework of two calculi
previously defined by the second and third authors.
Moreover, the work presents a theorem prover, called
{\bf GQML-Prover}, based on such a calculus, which is
accessible in the Internet. The fair deterministic
proof-search strategy used by the prover is described
and illustrated via a meaningful example.",
}
@InProceedings{Willard:TABLEAUX:2002,
author = "Dan E. Willard",
title = "Some New Exceptions for the Semantic Tableaux Version
of the Second Incompletness Theorem",
crossref = "TABLEAUX:2002",
pages = "281--297",
abstract = "This article continues our study of axiom systems that
can verify their own consistency and prove all Peano
Arithmetic's $\Pi_1$ theorems. We will develop some new
types of exceptions for the Semantic Tableaux Version
of the Second Incompleteness Theorem.",
}
@InProceedings{Wirth:TABLEAUX:2002,
author = "Claus-Peter Wirth",
title = "A New Indefinite Semantics for {H}ilbert's epsilon",
crossref = "TABLEAUX:2002",
pages = "298--314",
abstract = "After reviewing the literature on semantics of
Hilbert's epsilon symbol, we present a new one that is
similar to the referential interpretation of indefinite
articles in natural languages.",
}
@InProceedings{Zarba:TABLEAUX:2002,
author = "Calogero G. Zarba",
title = "A Tableau Calculus for Combining Non-Disjoint
Theories",
crossref = "TABLEAUX:2002",
pages = "315--329",
abstract = "The Nelson-Oppen combination method combines ground
satisfiability checkers for first-order theories
satisfying certain conditions into a single ground
satisfiability checker for the union theory. The most
significant restriction that the combined theories must
satisfy, for the Nelson-Oppen combination method to be
applicable, is that they must have disjoint signatures.
Unfortunately, this is a very serious restriction since
many combination problems concern theories over
non-disjoint signatures. \\
In this paper we present a tableau calculus for
combining first-order theories over non-disjoint
signatures. The calculus generalizes the Nelson-Oppen
combination method to formulae with quantifiers and to
the union of \emph{arbitrary} theories over non
necessarily disjoint signatures.",
}
@InProceedings{HabertNotinGalmiche:TABLEAUX:2002,
author = "Luc Habert and Jean-Marc Notin and Didier Galmiche",
title = "{LINK}: a Proof Environment based on Proof Nets",
type = "System Description",
crossref = "TABLEAUX:2002",
pages = "330--334",
abstract = "LINK is a proof environment including proof nets-based
provers for multiplicative linear logics: mixed linear
logic, or recently called non-commutative logic (MNL),
commutative linear logic (MLL) and non-commutative (or
cyclic) linear logic (MCyLL). Its main characteristic
is the provability analysis through automatic proof
nets construction, that appears as a powerful
alternative to deal with resource management in proof
search. These provers can be also seen as
implementations of new connection methods for these
linear logic fragments.",
}
@InProceedings{Stenz:TABLEAUX:2002,
author = "Gernot Stenz",
title = "{DCTP} 1.2",
type = "System Description",
crossref = "TABLEAUX:2002",
pages = "335--340",
abstract = "We describe version 1.2 of the theorem prover DCTP,
which is an implementation of the disconnection
calculus. The disconnection calculus is a confluent
tableau method using non-rigid variables. This current
version of DCTP has been extended and enhanced
significantly since its participation in the IJCAR
system competition in 2001. We briefly sketch the
underlying calculus and the proof procedure and
describe some of its refinements and new features. We
also present the results of some experiments regarding
these new features.",
}
@Proceedings{TABLEAUX:2002,
editor = "Uwe Egly and Christian G. Ferm{\"u}ller",
title = "Automated Reasoning with Analytic Tableaux and Related
Methods",
booktitle = "Automated Reasoning with Analytic Tableaux and Related
Methods",
conference = "International Conference",
key = "TABLEAUX",
year = "2002",
month = jul # " 30--" # aug # " 1",
venue = "Copenhagen, Denmark",
series = "Lecture Notes in Computer Science",
volume = "2381",
publisher = "Springer-Verlag",
ISBN = "3-540-43929-3",
ISSN = "0302-9743",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UNIF - 16th International Workshop on Unification
% ==========================================
@InProceedings{Kohlhase:UNIF:2002,
author = "Michael Kohlhase",
title = "Sorted Higher-Order Unification",
type = "Invited talk",
crossref = "UNIF:2002",
pages = "4",
abstract = "In recent years, many theorem proving systems have
been extended to higher-order logic (in particular
based on variants of the simply typed lambda calculus),
not the least, since the built-in notion of beta-eta
reduction gives a convenient and transparent definition
mechanism. The drawback of this approach is that the
main computational device (higher-order unification,
which is undecidable in general), becomes a major
bottleneck. \\
The main assumption in this talk is that the
introduction of additional knowledge into higher-order
unification will make the search for higher-order
unifiers (and thus for proofs in higher-order logic)
more guided and ultimately yield more efficient
higher-order theorem provers. The source of knowledge
that we want to explore in this talk is taxonomic
information given in a concept hierarchy. \\
Concretely, we will introduce a sorted lambda-calculus
where the universe is subdivided into various subsets
(called sorts), and the signature is augmented by sort
information. We introduce the technical device of term
declarations, which give a very expressive mechanism
for specifying (and using) sort information. \\
The central result will be a unification algorithm for
this logic and some decidability results for the
higher-order pattern fragment. \\
We will conclude the talk with a brief presentation of
two applications of sorted higher-order unification
outside theorem proving: natural language semantics and
formula retrieval from knowledge bases.",
}
@InProceedings{SchmidtSchaussSchulz:UNIF:2002,
author = "Manfred Schmidt-Schau{\ss} and Klaus U. Schulz",
title = "Decidability of Bounded Higher-Order Unification",
crossref = "UNIF:2002",
pages = "5--8",
abstract = "Unifiability of terms in the simply typed lambda
calculus with $\beta$ and $\eta$ rules becomes
decidable if there is a bound on the number of bound
variables and lambdas in a unifier in $\eta$-long
$\beta$-normal form.",
}
@InProceedings{Hamana:UNIF:2002,
author = "Makoto Hamana",
title = "Simple $\beta_0$-Unification for Terms with Context
Holes",
crossref = "UNIF:2002",
pages = "9--13",
}
@InProceedings{NiehrenVillaret:UNIF:2002,
author = "Joachim Niehren and Mateu Villaret",
title = "Parallelism and Tree Regular Constraints",
crossref = "UNIF:2002",
pages = "14--17",
abstract = "Parallelism constraints are logical descriptions of
trees. Parallelism constraints subsume dominance
constraints and are equally expressive to context
unification. Parallelism constraints belong to the
constraint language for lambda structures (CLLS) which
serves for modeling natural language semantics. In this
paper, we investigate the extension of parallelism
constraints by tree regular constraints. This canonical
extension is subsumed by the monadic second-order logic
over parallelism constraints. We analyze the precise
expressiveness of this extension on basis of a new
relationship between tree automata and logic. Our
result is relevant for classifying different extensions
of parallelism constraints, as in CLLS. Finally, we
prove that parallelism constraints and context
unification remain equivalent when extended with tree
regular constraints.",
}
@InProceedings{Monate:UNIF:2002,
author = "Benjamin Monate",
title = "Parameterized String Rewriting Systems",
crossref = "UNIF:2002",
pages = "18--20",
abstract = "We propose a new formalism for rewriting in order to
describe infinite families of constrained string
rewriting systems: parameterized string rewriting.
Moreover, these systems may include parametric
exponents or finite products. Our main result is a
critical pair lemma allowing decision of local
confluence. We apply this procedure to check
automatically local confluence of parameterized
rewriting presentations of some well-known families of
monoids and groups.",
}
@InProceedings{OhsakiTakai:UNIF:2002,
author = "Hitoshi Ohsaki and Toshinori Takai",
title = "A Tree Automata Theory for Unification Modulo
Equational Rewriting",
crossref = "UNIF:2002",
pages = "21--25",
abstract = "An extension of the tree automata framework, called
\emph{equational tree automata}, is presented. This
theory is useful to deal with unification modulo
equational rewriting. We demonstrate how equational
tree automata can be applied to unification problems.",
}
@InProceedings{GhilardiSacchetti:UNIF:2002,
author = "Silvio Ghilardi and Lorenzo Sacchetti",
title = "Filtering Unification: the Case of Modal Logic",
crossref = "UNIF:2002",
pages = "26--30",
abstract = "We deal with the case in which $E$-unification is {\it
filtering}: this means that, given two solutions to a
unification problem, there is always another one which
is better than both of them. In this quite curious
situation, unification problems can either be very nice
(i.e. they admit a most general unifier), or extremely
bad (no bases of unifiers exist). Examples are
well-known: we have the commutative/monoidal theories.
Commutative/monoidal theories include the $\Box,
\wedge, \top$-fragment of the system $K$. We shall show
in this paper that filtering unification is quite
common indeed in {\it full} modal systems. First of
all, we prove that (under mild hypotheses) filtering
unification is characterized by the fact that finitely
presented {\it projective} algebras are closed under
binary products. Secondly, we apply this
characterization to the case of normal extensions $E$
of the modal system $K4$ and show that {\it $E$ has
filtering unification iff it extends $K4.2^+$}. There
are extensions of $K4.2^+$ for which most general
unifiers do not exist, however they exist for systems
like $K4.2^+$, $S4.2$, $GL.2^+$, $Grz.2$, etc.",
}
@InProceedings{DershowitzKirchner:UNIF:2002,
author = "Nachum Dershowitz and Claude Kirchner",
title = "Abstract Canonical Inference Systems",
type = "Invited talk",
crossref = "UNIF:2002",
pages = "31",
abstract = "We provide a general proof theoretical setting under
which the so-called ``completion processes'' (as used
for equational reasoning) can be modeled, understood,
studied, proved and generalized. This framework---based
on a well-founded ordering on proofs---allows us to
derive saturation processes and redundancy criteria
abstractly.",
}
@InProceedings{Ranise:UNIF:2002,
author = "Silvio Ranise",
title = "A Superposition Decision Procedure for the Logic of
Equality with Interpreted and Uninterpreted Functions",
crossref = "UNIF:2002",
pages = "32--36",
abstract = "The logic of equality with uninterpreted functions has
been proved useful in many verification efforts, such
the verification of pipelined microprocessors.
Uninterpreted functions allow to abstract from
unimportant details and make the proof obligations
amenable to ``push-button'' verification methods.
However, it is often the case that some functions must
be (partially) interpreted to successfully check some
property. In this abstract, we present a decision
procedure for the logic of uninterpreted functions
which is capable of reasoning modulo a background
theory which (partially) interprets a subset of the
functions. We combine well-known techniques of the
rewriting approach to automatic theorem proving, such
as saturation and clausal normal form transformation.
We also report on preliminary experimental results.",
}
@InProceedings{BachmairScharff:UNIF:2002,
author = "Leo Bachmair and Christelle Scharff",
title = "Direct Combination of Completion and Congruence
Closure",
crossref = "UNIF:2002",
pages = "37--39",
abstract = "Theories presented by finite sets of ground equations
are known to be decidable. There are different
approaches to dealing with ground equational theories.
Researchers interested in term rewriting have applied
completion, a method that transforms a given set of
equations into a set of directed rules that is both
terminating and confluent. A rather distinct approach
to solving word problems for ground equational theories
is based on the computation of the so-called congruence
closure of a relation. In this work we bridge the gap
between the two approaches and provide a better
understanding of the connection between completion and
congruence closure by defining a new, efficient
congruence closure method that integrates key ideas of
completion and graph-based methods of congruence
closure in a novel, direct and natural way.",
}
@InProceedings{MarinMiddeldorp:UNIF:2002,
author = "Mircea Marin and Aart Middeldorp",
title = "New Completeness Results for Lazy Conditional
Narrowing",
crossref = "UNIF:2002",
pages = "40--44",
abstract = "We show the completeness of a lazy conditional
narrowing calculus (LCNC) with leftmost selection for
the class of confluent deterministic conditional
rewrite systems (CTRSs). Rewrite rules in deterministic
CTRSs may have extra variables in right-hand sides and
conditions. From the completeness proof we obtain
several insights to make the calculus more
deterministic. Furthermore, and similar to the
refinements developed for the unconditional case, we
succeeded in removing almost all non-determinism due to
the choice of the inference rule of LCNC by imposing
further syntactic conditions on the participating CTRSs
and restricting the set of solutions for which
completeness needs to be established.",
}
@InProceedings{KorovinVoronkov:UNIF:2002,
author = "Konstantin Korovin and Andrei Voronkov",
title = "The Decidability of the First-Order Theory of the
{K}nuth-{B}endix Orders in the Case of Unary
Signatures",
crossref = "UNIF:2002",
pages = "45--46",
}
@InProceedings{Lynch:UNIF:2002,
author = "Christopher Lynch",
title = "Decidability and Complexity of ${E}$-Unification for
some classes of Equational Theories",
type = "Invited talk",
crossref = "UNIF:2002",
pages = "47--51",
abstract = "$E$-unification is, in general, an undecidable
problem. We discuss some classes of theories that have
been shown to be decidable, and whose complexity has
been analyzed. These classes have either been defined
by a syntactic criteria, or by closure under some
inference rule. We consider completion-based inference
systems and goal-directed inference systems, but we
concentrate on goal-directed inference systems. We
discuss some methods of showing that an $E$-unification
procedure terminates, and show how those methods are
used prove some classes to be decidable and to analyze
their complexity.",
}
@InProceedings{AnantharamanNarendranRusinowitch:UNIF:2002,
author = "Siva Anantharaman and Paliath Narendran and
Micha{\"e}l Rusinowitch",
title = "{ACID}-Unification, Rewrite Reachability and Set
Constraints",
crossref = "UNIF:2002",
pages = "52--53",
abstract = "In this paper we study the $E$-unification problem
w.r.t. theories extending $ACI$ or $ACUI$. Particular
cases of interest are $ACID$ and $ACUID$, where $ACI$
(resp. $ACUI$) is enhanced with the distributivity laws
for a given `$*$' over an $ACI$ (resp. $ACUI$) symbol
`$+$'; these theories are useful in program
specification based on set constraints. We first look
at the unification problem w.r.t. a theory $ACUIH^C$
(`$ACUI$ plus a set of commuting homomorphisms')
situated between $ACUI$ and $ACUID$, and show the
problem to be undecidable; the proof is via reduction
from the reachability problem for Minsky machine
configurations. From this we deduce that $AC(U)ID$
unification is also undecidable, if equations of
associativity-commutativity or just of associativity on
`$*$', are added on to $AC(U)ID$. If no further law on
`$*$' is assumed other than distributivity over`$+$',
then $ACID$-unification reduces to solving a set
constraints problem with {\em finite, non-empty} sets.
We present an approach based on labeled dag automata to
show that it is NEXPTIME-decidable. A DEXPTIME lower
bound is obtained by other considerations. These
conclusions also hold for $ACUID$-unification.",
}
@InProceedings{BaaderTinelli:UNIF:2002,
author = "Franz Baader and Cesare Tinelli",
title = "{ACU}-Unification with a Successor Symbol",
crossref = "UNIF:2002",
pages = "54--58",
abstract = "Unification in the presence of an
associative-commutative binary function symbol $+$ with
a unit element $0$ (ACU-unification) is
well-investigated. This note is concerned with what
happens if the signature is extended by a successor
symbol $s$ that satisfies the usual equation w.r.t.
$+$, i.e., $x + s(y) = s(x + y)$. We show that
unification in the theory obtained this way is
basically the same as ACU-unification with an
additional free constant symbol.",
}
@InProceedings{Nguyen:UNIF:2002,
author = "Quang Huy Nguyen",
title = "A Constructive Decision Procedure for Equalities
Modulo {AC}",
crossref = "UNIF:2002",
pages = "59--63",
abstract = "We provide in this paper an optimised constructive
decision procedure for AC equalities based on the
\emph{syntacticness} of AC theories. The main idea is
to reduce the search space by considering only terms in
associative canonical form. This decision procedure has
been used in the context of an ELAN based tactic for
rewriting modulo AC in Coq where the proofs of AC
equalities are efficiently searched by ELAN and checked
in Coq.",
}
@InProceedings{Cervesato:UNIF:2002,
author = "Iliano Cervesato",
title = "Solution Count for Multiset Unification with Trailing
Multiset Variables",
crossref = "UNIF:2002",
pages = "64--68",
abstract = "We consider a subproblem of AC1 unification applied to
multisets, where the expressions being unified mention
at most one multiset variable. This extends the
unification problem of lists in Prolog-like languages
with commutativity. We give an upper bound to the
number of solutions and outline an algorithm for their
generation.",
}
@Proceedings{UNIF:2002,
editor = "Christophe Ringeissen and Cesare Tinelli and Ralf
Treinen and Rakesh M. Verma",
title = "16th International Workshop on Unification",
booktitle = "16th International Workshop on Unification",
key = "UNIF",
pages = "68",
year = "2002",
month = jul # " 25--26",
venue = "Copenhagen, Denmark",
series = "Technical Report",
volume = "02-05",
institution = "Department of Computer Science",
address = "University of Iowa, USA",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VERIFY - Verification Workshop
% =====================
@InProceedings{Massacci:FLoC:2002:*VERIFY+FCS,
author = "Fabio Massacci",
title = "Formal Verification of {SET} by {V}isa and
{M}astercard: Lessons for Formal Methods in Security",
type = "Invited talk",
crossref = "VERIFY:2002",
pages = "1--4",
abstract = "The Secure Electronic Transaction (SET) protocol has
been proposed by a consortium of credit card companies
and software corporations to secure e-commerce
transactions. When the customer makes a purchase, the
SET dual signature guarantees authenticity while
keeping the customer's account details secret from the
merchant and his choice of goods secret from the bank.
\\
SET verification has always been a holy grail for
security verification and many papers do conclude with
{"}and this technique can be applied to SET{"} and yet
the forthcoming application is not so forthcoming\ldots
\\
In this talk, I report the results of the verification
efforts on the SET protocol, a joint work with G. Bella
and L. Paulson from the University of Cambridge. In a
nutshell, we proved that the protocol is reasonably
secure. by using Isabelle and the inductive method we
showed that the credit card details do remain
confidential and customer, merchant and bank can
confirm most details of a transaction even when some of
those details are kept from them. \\
And now, the question come: you verified SET, so what?
\\
What can we learn for this verification effort? Are
there lessons for security design? Which security
designs are easier to verify? What kind of techniques
and tricks are necessary? What do we need to scale so
that security verification can become an easier task? I
will give a personal perspective on the problem.",
}
@InProceedings{Kroening:VERIFY:2002,
author = "Daniel Kr{\"o}ning",
title = "Application Specific Higher Order Logic Theorem
Proving",
crossref = "VERIFY:2002",
pages = "5--15",
abstract = "Theorem proving allows the formal verification of the
correctness of very large systems. In order to increase
the acceptance of theorem proving systems during the
design process, we implemented higher order logic proof
systems for ANSI-C and Verilog within a framework for
application specific proof systems. Furthermore, we
implement the language of the PVS theorem prover as
well-established higher order specification language.
The tool allows the verification of the design
languages using a PVS specification and the
verification of hardware designs using a C program as
specification. We implement powerful decision
procedures using Model Checkers and satisfiability
checkers. We provide experimental results that compare
the performance of our tool with PVS on large
industrial scale hardware examples.",
}
@InProceedings{Vanackere:VERIFY:2002,
author = "Vincent Vanack{\`e}re",
title = "The {TRUST} protocol analyser, Automatic and efficient
verification of cryptographic protocols",
crossref = "VERIFY:2002",
pages = "17--27",
abstract = "The paper presents TRUST, a verifier for cryptographic
protocols. In our framework, a protocol is modeled as a
finite number of processes interacting with an hostile
environment; the security properties expected from the
protocol are specified by inserting logical assertions
on the environment knowledge in the process.\\
Our analyser relies on an exact symbolic reduction
method combined with several techniques aiming to
alleviate the number of interleavings that have to be
considered. We argue that our verifier is able to
perform a full analysis on up to 3 parallel
(interleaved) sessions of most protocols. Moreover,
authentication and secrecy properties are specified in
a very natural way, and whenever an error is found an
attack against the protocol is given by our tool.",
}
@InProceedings{BenzmuellerGirominiNonnengartZimmer:VERIFY:2002,
author = "Christoph Benzm{\"u}ller and Corrado Giromini and
Andreas Nonnengart and J{\"u}rgen Zimmer",
title = "Reasoning Services in {MathWeb-SB} for symbolic
verification of Hybrid Systems",
crossref = "VERIFY:2002",
pages = "29--39",
abstract = "We propose to apply mathematical service systems
developed in the context of the Calculemus initiative
to support the verification of hybrid systems. For this
we want to identify and analyse different kinds of
mathematical subtasks occurring in industrial-strength
examples of hybrid system verification. These kinds of
subtasks, suitable for being tackled with reasoning
specialists, can be modeled as mathematical service
requests to a network of service systems like the
Mathweb Software Bus. For the deductive model checking
approach we have identified the following candidates of
mathematical service requests: The solution of
differential equations, subsumption of sets of
constraints, and their solution. A further candidate
can be the elimination of second",
}
@InProceedings{AppelMichaelStumpVirga:FLoC:2002:FCS+*VERIFY,
author = "Andrew W. Appel and Neophytos G. Michael and Aaron
Stump and Roberto Virga",
title = "A Trustworthy Proof Checker",
crossref = "VERIFY:2002",
pages = "41--52",
abstract = "Proof-Carrying Code (PCC) and other applications in
computer security require machine-checkable proofs of
properties of machine-language programs. The main
advantage of the PCC approach is that the amount of
code that must be explicitly trusted is very small: it
consists of the logic in which predicates and proofs
are expressed, the safety predicate, and the proof
checker. We have built a minimal-TCB checker, and we
explain its design principles, and the representation
issues of the logic, safety predicate, and safety
proofs. We show that the trusted code in such a system
can indeed be very small. In our current system the TCB
is less than 2,700 lines of code (an order of magnitude
smaller even than other PCC systems) which adds to our
confidence of its correctness.",
}
@InProceedings{Cohen:FLoC:2002:FCS+*VERIFY,
author = "Ernie Cohen",
title = "Proving Protocols Safe from Guessing",
crossref = "VERIFY:2002",
pages = "53--60",
abstract = "We describe how to prove cryptographic protocols
secure against a Dolev-Yao attacker that can also
engage in idealized offline guessing attacks. Our
method is based on constructing a first-order invariant
that bounds, in every reachable state, both the
information available to an an attacker and the steps
of guessing attacks starting from this information. We
have implemented the method as an extension to the
protocol verifier TAPS, making it the first mechanical
verifier to prove protocols secure against guessing
attacks in an unbounded model.",
}
@InProceedings{ArmandoCompagna:FLoC:2002:FCS+*VERIFY,
author = "Alessandro Armando and Luca Compagna",
title = "Automatic {SAT}-Compilation of Security Problems",
crossref = "VERIFY:2002",
pages = "61--69",
abstract = "We provide a fully automatic translation from security
protocol specifications into propositional logic which
can be effectively used to find attacks to protocols.
Our approach results from the combination of a
reduction of security problems to planning problems and
well-known SAT-reduction techniques developed for
planning. We also propose and discuss a set of
transformations on security problems whose application
has a dramatic effect on the size of the propositional
encoding obtained with our SAT-compilation technique.
We describe a model-checker for security protocols
based on our ideas and show that attacks to a set of
well-known authentication protocols are found in few
seconds by state-of-the-art SAT solvers.",
}
@InProceedings{Meadows:FLoC:2002:FCS+*VERIFY,
author = "Catherine Meadows",
title = "Identifying Potential Type Confusion in Authenticated
Messages",
crossref = "VERIFY:2002",
pages = "71--80",
abstract = "A type confusion attack is one in which a principal
accepts data of one type as data of another. Although
it has been shown by Heather et al. that there are
simple formatting conventions that will guarantee that
protocols are free from simple type confusions in which
fields of one type are substituted for fields of
another, it is not clear how well they defend against
more complex attacks, or against attacks arising from
interaction with protocols that are formatted according
to different conventions. In this paper we show how
type confusion attacks can arise in realistic
situations even when the types are explicitly defined
in at least some of the messages, using examples from
our recent analysis of the Group Domain of
Interpretation Protocol. We then develop a formal model
of types that can capture potential ambiguity of type
notation, and outline a procedure for determining
whether or not the types of two messages can be
confused. We also discuss some open issues.",
}
@InProceedings{SteelBundyDenney:FLoC:2002:FCS+*VERIFY,
author = "Graham Steel and Alan Bundy and Ewen Denney",
title = "Finding Counterexamples to Inductive Conjectures and
Discovering Security Protocol Attacks",
crossref = "VERIFY:2002",
pages = "81--90",
abstract = "We present an implementation of a method for finding
counterexamples to universally quantified conjectures
in first-order logic. Our method uses the proof by
consistency strategy to guide a search for a
counterexample and a standard first-order theorem
prover to perform a concurrent check for inconsistency.
We explain briefly the theory behind the method,
describe our implementation, and evaluate results
achieved on a variety of incorrect conjectures from
various sources. Some work in progress is also
presented: we are applying the method to the
verification of cryptographic security protocols. In
this context, a counterexample to a security property
can indicate an attack on the protocol, and our method
extracts the trace of messages exchanged in order to
effect this attack. This application demonstrates the
advantages of the method, in that quite complex side
conditions decide whether a particular sequence of
messages is possible. Using a theorem prover provides a
natural way of dealing with this. Some early results
are presented and we discuss future work. Keywords:
Counterexamples, Security Protocols, Non-theorems,
Proof by Consistency.",
}
@InProceedings{HerzogGuttman:VERIFY:2002,
author = "Amy L. Herzog and Joshua Guttman",
title = "Eager Formal Methods for Security Management",
crossref = "VERIFY:2002",
pages = "91--101",
abstract = "Controlling complexity is a core problem in
information security. Achieving a security goal in a
networked system requires the cooperation of many
devices, such as routers, firewalls, virtual private
network gateways, and individual host operating
systems. Different devices may require different
configurations, depending on their purposes and network
locations. Many information security problems may be
solved given models of these devices and their
interactions. We have focused for several years on
these problems, using \emph{eager formal methods} as
our approach.\\
Eager formal methods front-loads the contribution of
formal methods to problem-solving. The focus is on
modeling devices, their behavior as a function of
configurations, and the consequences of their
interactions. A class of practically important security
goals must also be expressible in terms of these
models.\\
In eager formal methods, the models suggest algorithms
taking as input information about system configuration,
and returning the security goals satisfied in that
system. In some cases, we can also derive algorithms to
generate configurations to satisfy given security
goals. The formal models provide a rigorous
justification of soundness. By contrast, algorithms are
implemented as ordinary computer programs requiring no
logical expertise to use. Resolving practical problems
then requires little time, and no formal methods
specialists.\\
We have applied this approach to several problems. In
this extended abstract, we briefly describe two
problems and the modeling frameworks that lead to
solutions. The first is the distributed packet
filtering problem, in which filtering routers or
firewalls are located at various points in a network
with complex topology. The problem is to constrain the
flow of different types of packets through the network.
The second problem concerns configuring gateways for
the IP security protocols (IPsec); the problem is to
ensure that authentication and confidentiality goals
are achieved for specific types of packets traversing
particular paths through the network. Solutions to
these problems have been published and implemented. We
also describe how to unify the two solutions, so that
packet filtering goals and IPsec authentication and
confidentiality are jointly enforced on a network.",
}
@InProceedings{ArmandoBonacinaSehgalRaniseRusinowitch:VERIFY:2002,
author = "Alessandro Armando and Maria Paola Bonacina and Silvio
Ranise and Micha{\"e}l Rusinowitch and Aditya Kumar
Sehgal",
title = "High-performance deduction for verification: a case
study in the theory of arrays",
crossref = "VERIFY:2002",
pages = "103--112",
abstract = "We outline an approach to use ordering-based
theorem-proving strategies as satisfiability procedures
for certain decidable theories. We report on
experiments with synthetic benchmarks in the theories
of arrays with extensionality, showing that a theorem
prover - the E system - compares favorably with the
state-of-the-art validity checker CVC.",
}
@InProceedings{BeckertKellerSchmidt:VERIFY:2002,
author = "Bernhard Beckert and Uwe Keller and Peter H. Schmitt",
title = "Translating the Object Constraint Language into
First-order Predicate Logic",
crossref = "VERIFY:2002",
pages = "113--123",
abstract = "In this paper we define a translation of UML class
diagrams with OCL constraints into first-order
predicate logic. The goal is logical reasoning about
UML models, realized by an interactive theorem prover.
We put an emphasis on usability of the formulas
resulting from the translation, and we have developed
optimisations and heuristics to enhance the efficiency
of the theorem proving process.\\
The translation has been implemented as part of the KeY
system, but our implementation can also be used
stand-alone.",
}
@Proceedings{VERIFY:2002,
editor = "Serve Autexier and Heiko Mantel",
title = "Verification Workshop",
booktitle = "Verification Workshop",
key = "VERIFY",
pages = "viii+125",
year = "2002",
month = jul # " 25--26",
venue = "Copenhagen, Denmark",
series = "DIKU technical reports",
volume = "02-07",
institution = "University of Copenhagen, Dept.~of Computer Science",
ISSN = "0107-8283",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WLPE - Workshop on Logic Programming Environments
% ==========================================
@InProceedings{AgrenSzerediBeldiceanuCarlsson:WLPE:2002,
author = "Magnus {\AA}gren and Tam{\'a}s Szeredi and Nicolas
Beldiceanu and Mats Carlsson",
title = "Tracing and Explaining Execution of {CLP(FD)}
Programs",
crossref = "WLPE:2002",
pages = "1--16",
abstract = "Previous work in the area of tracing CLP(FD) programs
mainly focuses on providing information about control
of execution and domain modification. In this paper, we
present a trace structure that provides information
about additional important aspects. We incorporate
explanations in the trace structure, i.e. reasons for
why certain solver actions occur. Furthermore, we come
up with a format for describing the execution of the
filtering algorithms of global constraints. Some new
ideas about the design of the trace are also presented.
For example, we have modeled our trace as a nested
block structure in order to achieve a hierarchical
view. Also, new ways about how to represent and
identify different entities such as constraints and
domain variables are presented.",
}
@InProceedings{Lesaint:WLPE:2002,
author = "Willy Lesaint",
title = "Value withdrawal explanations: a theoretical tool for
programming environments",
crossref = "WLPE:2002",
pages = "17--30",
abstract = "Constraint logic programming combines declarativity
and efficiency thanks to constraint solvers implemented
for specific domains. Value withdrawal explanations
have been efficiently used in several constraints
programming environments but there does not exist any
formalization of them. This paper is an attempt to fill
this lack. Furthermore, we hope that this theoretical
tool could help to validate some programming
environments. A value withdrawal explanation is a tree
describing the withdrawal of a value during a domain
reduction by local consistency notions and labeling.
Domain reduction is formalized by a search tree using
two kinds of operators: operators for local consistency
notions and operators for labeling. These operators are
defined by sets of rules. Proof trees are built with
respect to these rules. For each removed value, there
exists such a proof tree which is the withdrawal
explanation of this value.",
}
@InProceedings{OuisJussienBoizumault:WLPE:2002,
author = "Samir Ouis and Narendra Jussien and Patrice
Boizumault",
title = "{COINS}: a constraint-based interactive solving
system",
crossref = "WLPE:2002",
pages = "31--46",
abstract = "This paper describes the COINS (COnstraint-based
INteractive Solving) system: a conflict-based
constraint solver. COINS is an improved constraint
solver that helps understanding inconsistencies,
simulates constraint additions and/or retractions
(without any propagation), determines if a given
constraint belongs to a conflict and provides diagnosis
tools (\emph{e.g.} why variable $v$ cannot take value
$val$). COINSprovides user-friendly representation of
conflicts and explanations.",
}
@InProceedings{Fages:WLPE:2002,
author = "Fran{\c c}ois Fages",
title = "{CLPGUI}: a generic graphical user interface for
constraint logic programming over finite domains",
crossref = "WLPE:2002",
pages = "47--62",
abstract = "CLPGUI is a graphical user interface for visualizing
and interacting with constraint logic programs over
finite domains. In CLPGUI, the user can control the
execution of a CLP program through several views of
constraints, of finite domain variables and of the
search tree. CLPGUI is intended to be used both for
teaching purposes, and for debugging and improving
complex programs on real size data. It is based on a
client-server architecture for connecting the CLP
process to a Java-based GUI process. Communication by
message passing provides an open architecture which
facilitates the reuse of graphical components and the
porting to different constraint programming systems.
Constraints and goals can be posted incrementally from
the GUI. The level of granularity of the search tree is
speciaed by annotations in the CLP program. The
visualized search tree is a subtree of the SLD
derivation tree with constraints. We propose several
dynamic 2D and 3D visualizations of the search tree and
of the evolution of finite domain variables. We
describe the current implementation of the annotations
and of the interactive execution model in GNU-Prolog,
and report some evaluation results.",
}
@InProceedings{VaucheretBueno:WLPE:2002,
author = "Claudio Vaucheret and Francisco Bueno",
title = "More Precise Yet Efficient Type Inference for Logic
Programs",
crossref = "WLPE:2002",
pages = "63--76",
abstract = "Type analyses of logic programs which aim at inferring
the types of the program being analyzed are presented
in a unified abstract interpretation-based framework.
This covers most classical abstract
interpretation-based type analyzers for logic programs,
built on either top-down or bottom-up interpretation of
the program. In this setting, we discuss the widening
operator, arguably a crucial one. We present a new
widening which is more precise than those previously
proposed. Practical results with our analysis domain
are also presented, showing that it also allows for
efficient analysis.",
}
@InProceedings{NeumerkelKral:WLPE:2002,
author = "Ulrich Neumerkel and Stefan Kral",
title = "Declarative program development in {P}rolog with
{GUPU}",
crossref = "WLPE:2002",
pages = "77--86",
abstract = "We present the side-effect free programming course
environment GUPU that seamlessly guides and supports
students during all phases of program development
covering specification, implementation, and program
debugging. GUPU features several innovations in this
area. The specification phase is supported by reference
implementations augmented with diagnostic facilities.
During implementation immediate feedback from test
cases and visualization tools helps the programmer's
program understanding. A set of slicing techniques is
used to narrow down programming errors. Finally, the
whole process is guided by a marking system.",
}
@InProceedings{GuptaGopal:WLPE:2002,
author = "Gopal Gupta and Deepa Gopal",
title = "Towards a Logic Programming Based Environment for
Automatic Generation of Translators",
crossref = "WLPE:2002",
pages = "87--88",
abstract = "We report on the development of a logic programming
based environment for automatic generation of
translators for translating one formal notation (a
programming language, a mark-up language, etc.) to
another. Automatic generation of translators will make
the task of {\it porting} and {\it filtering} much
faster, thereby improving programmer productivity.
Porting (migrating a program from one machine or system
to another) and filtering (translating a program
written in one particular notation to accomplish a
particular task to another notation that accomplishes
the same task) are important problems that arise
commonly in business computing, since the underlying
software, operating system, and hardware change
frequently and require the user programs to be modified
accordingly. With advent of eXtensible Markup Languages
(XML) for WWW, porting/filtering have acquired greater
significance as rapidly translating one XML to another
XML and vice versa is very important for document
interoperability and electronic commerce. Using our
graphical tool a programmer will be able to {\it
rapidly} specify translators by pictorially specifying
the parse tree patterns and the semantic mappings
between the source and target notations. Our tool will
automatically generate the translator code in Prolog
from this specification. Automatic generation implies
that the generated translator will contain fewer errors
compared to a manually written one.",
}
@InProceedings{Angelopoulos:WLPE:2002,
author = "Nicos Angelopoulos",
title = "Exporting {P}rolog source code",
crossref = "WLPE:2002",
pages = "89--96",
abstract = "In this paper we present a simple source code
configuration tool. ExLibris operates on libraries and
can be used to extract from local libraries all code
relevant to a particular project. Our approach is not
designed to address problems arising in code production
lines, but rather, to support the needs of individual
or small teams of researchers who wish to communicate
their Prolog programs. In the process, we also wish to
accommodate and encourage the writing of reusable code.
Moreover, we support and propose ways of dealing with
issues arising in the development of code that can be
run on a variety of \emph{like-minded} Prolog systems.
With consideration to these aims we have made the
following decisions: (i) support file-based source
development, (ii) require minimal program
transformation, (iii) target simplicity of usage, and
(iv) introduce minimum number of new primitives.",
}
@InProceedings{WielemakerAnjewierden:WLPE:2002,
author = "Jan Wielemaker and Anjo Anjewierden",
title = "An Architecture for Making Object-Oriented Systems
Available from {P}rolog",
crossref = "WLPE:2002",
pages = "97--110",
abstract = "It is next to impossible to develop real-life
applications in just pure Prolog. With XPCE we realised
a mechanism for integrating Prolog with an external
object-oriented system that turns this OO system into a
natural extension to Prolog. We describe the design and
how it can be applied to other external OO systems.",
}
@InProceedings{FalkmanTorgersson:WLPE:2002,
author = "G{\"o}ran Falkman and Olof Torgersson",
title = "Enhancing Usefulness of Declarative Programming
Frameworks through Complete Integration",
crossref = "WLPE:2002",
pages = "111--122",
abstract = "If declarative programming languages are to compete
with their imperative and object-oriented counterparts
as general purpose programming tools, building complete
programming environments, including a sufficient set of
standard libraries and project management tools, is
necessary. An alternative approach is to make use of
all the development years put into legacy programming
tools, and to combine these with declarative
programming. A problem with this integration of
different paradigms and tools is that connecting the
different parts of a system often is rather
complicated, lessening the chance that declarative
languages really become used in real-world interactive
applications.\\
The Gisela framework for declarative programming was
developed with the specific aim of providing a tool
that would be useful for knowledge representation and
reasoning within state-of-the-art desktop and web
applications. As such, it provides two complete
application programming interfaces ({\sc api}):
Programming using objects and programming using a
traditional equational declarative style. In addition
to providing complete integration into an
object-oriented development environment, Gisela also
allows extensions and modifications, due to the general
computation model and well-defined {\sc api}s. The
framework has been used to develop real-world
applications, which are in daily use. We present the
declarative model underlying Gisela, the methodology
proposed for building applications and give a brief
overview of some example applications.",
}
@Proceedings{WLPE:2002,
editor = "Alexandre Tessier",
title = "Workshop on Logic Programming Environments",
booktitle = "Workshop on Logic Programming Environments",
conference = "12th International Workshop",
key = "WLPE",
pages = "vi+123",
year = "2002",
month = jul # " 31",
venue = "Copenhagen, Denmark",
publisher = "Computing Research Repository (CoRR)",
address = "http://www.acm.org/repository",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WRS - Workshop on Reduction Strategies in Rewriting and Programming
% =============================================================
@InProceedings{Middeldorp:WRS:2002,
author = "Aart Middeldorp",
title = "Approximations for Strategies and Termination",
type = "Invited talk",
crossref = "WRS:2002",
pages = "1--2",
abstract = "In the talk we illustrate the use of approximations
and tree automata techniques to define optimal
normalizing strategies for large classes of first-order
rewrite systems. We further show how the very same
ideas can be used to improve the dependency pair method
for proving termination of rewrite systems
automatically. If time permits, we present a new and
computationally less expensive improvement for the
latter.",
}
@InProceedings{RetyVuotto:WRS:2002,
author = "Pierre R{\'e}ty and Julie Vuotto",
title = "Regular Sets of Descendants by Leftmost Strategy",
crossref = "WRS:2002",
pages = "3--20",
abstract = "For a constructor-based rewrite system R, a regular
set of ground terms E, and assuming some additional
restrictions, we build a finite tree automaton that
recognizes the descendants of E, i.e. the terms issued
from E by rewriting, according to leftmost strategy.",
}
@InProceedings{BrandKlintVinju:WRS:2002,
author = "Mark van den Brand and Paul Klint and Jurgen Vinju",
title = "Term Rewriting with Type-safe Traversal Functions",
crossref = "WRS:2002",
pages = "21--37",
abstract = "Term rewriting is an appealing technique for
performing program analysis and program transformation.
Tree (term) traversal is frequently used but is not
supported by standard term rewriting. In this paper,
many-sorted first-order term rewriting is extended with
automatic tree traversal by adding two primitive tree
traversal strategies and complementing them with three
types of traversals. These so-called traversal
functions can be either top-down or bottom-up. They can
be sort preserving, mapping to a single sort, or a
combination of these two. Traversal functiona have a
simple design, their application is type-safe in a
first-order many-sorted setting and can be implemented
efficiently. We describe the operational semantics of
traversal functions and discuss applications.",
}
@InProceedings{Laemmel:WRS:2002,
author = "Ralf Laemmel",
title = "The Sketch of a Polymorphic Symphony",
crossref = "WRS:2002",
pages = "39--58",
abstract = "Functional strategies were previously defined as
first-class generic functions which can traverse into
terms while mixing uniform and type-specific behaviour.
The first-class status is witnessed by a combinator
style of generic programming. This symphony
reconstructs functional strategies as an amalgamation
of certain bits of parametric polymorphism, type case,
polytypism, and overloading. We illustrate the
expressivness and conciseness of this reconstruction by
providing highly parameterized definitions of traversal
schemes. The resulting style of generic programming is
extremely lightweight and easy to use because it only
involves two special combinators not yet present in
standard functional programming. The reconstruction is
geared towards Haskell, and it is supported by a
generative tool YAGH---Yet Another Generic Haskell.",
}
@InProceedings{OlmosVisser:WRS:2002,
author = "Karina Olmos and Eelco Visser",
title = "Strategies for Source-to-Source Constant Propagation",
crossref = "WRS:2002",
pages = "59--77",
abstract = "Data-flow optimizations are usually implemented on
low-level intermediate representations. This is not
appropriate for source-to- source optimizations, which
reconstruct a source level program after
transformation. In this paper we show how constant
propagation, a well known data-flow optimization
problem, can be implemented on abstract syntax trees in
Stratego, a rewriting system extended with programmable
rewriting strategies for the control over the
application of rules and dynamic rewrite rules for the
propagation of information.",
}
@InProceedings{Oostrom:FLoC:2002:HOR+*WRS,
author = "Vincent van Oostrom",
title = "Optimal Strategies in Higher-Order Rewriting",
type = "Invited talk",
crossref = "WRS:2002",
pages = "79",
}
@InProceedings{GlauertKhasidashvili:WRS:2002,
author = "John Glauert and Zurab Khasidashvili",
title = "An Abstract {B}{\"o}hm-normalization",
crossref = "WRS:2002",
pages = "81--96",
abstract = "In this paper, we study normalization by neededness
with respect to 'infinite results', such as
B{\"o}hm-trees, in an abstract framework of Stable
Deterministic Residual Structures (SDRS). We formalize
the concept of 'infinite results' as suitable sets of
infinite reductions, and prove an abstract infinitary
normalization theorem with respect to such sets. We
also give a sufficient and necessary condition for
existence of minimal normalizing reductions.",
}
@InProceedings{AlbertEtAl:WRS:2002,
author = "Elvira Albert and Michael Hanus and Frank Huch and
Javier Oliver and Germ{\'a}n Vidal",
title = "Operational Semantics for Lazy Functional Logic
Programs",
crossref = "WRS:2002",
pages = "97--112",
abstract = "In this paper we define an operational semantics for
lazy functional logic programs including notions like
sharing, concurrency, non- determinism, etc. Such a
semantic description is not only important to provide
appropriate language definitions to reason about
programs and check the correctness of implementations
but it is also a basis to develop language-specific
tools, like program tracers, profilers, optimizers,
etc. First, we define a ``big-step'' semantics in
natural style to relate expressions and their evaluated
results. Since this semantics is not sufficient to
cover concurrency, search strategies, or to reason
about costs associated to particular computations, we
also define a ``small-step'' operational semantics
covering the features of modern functional logic
languages. Finally, we provide the correctness of the
small-step operational semantics.",
}
@InProceedings{HarrisonKieburtz:WRS:2002,
author = "William L. Harrison and Richard B. Kieburtz",
title = "Pattern-driven Reduction in {H}askell",
crossref = "WRS:2002",
pages = "113--125",
abstract = "Haskell is a functional programming language with
nominally non-strict semantics, implying that
evaluation of a Haskell expression proceeds by
demand-driven reduction. However, Haskell also provides
pattern matching on arguments of functions, in let
expressions and in the match clauses of case
expressions. Pattern matching requires data- driven
reduction to the extent necessary to evaluate a pattern
match or to bind variables introduced in a pattern. In
this paper, we provide both an abstract semantics and a
logical characterization of pattern-matching in Haskell
and the reduction order that it entails.",
}
@InProceedings{AyalaEtAl:WRS:2002,
author = "Mauricio Ayala-Rinc{\'o}n and Rinaldi Maya Neto and
Ricardo P. Jacobi and Carlos Llanos and Reiner
Hartenstein",
title = "Applying {ELAN} Strategies in Simulation Processors
over Simple Architectures",
crossref = "WRS:2002",
pages = "127--141",
abstract = "Simulation of processors over simple architectures is
an important technique for verifying previously to the
expensive hardware implementation techniques involved
in new technologies. Arvind's group has illustrated how
to describe processors by reweriting and introduced a
technique for proving the correctness of elaborated
processors with respect to basic ones. The correctness
of a processor is proved by showing that its related
rewriting system does all that other rewriting system,
considered correct, does. Basic concepts of rewriting
as noetherianity and confluence are relevant for
showing adequability of these rewrite based processor
descriptions. In Arvind group's approach, simulation of
the described is not done directly over the rewriting
systems but over standard hardware description
languages like Verilog after translating these rewrite
descriptions adequately. Her we show how rewriting
logic may be applied for purely rewriting simulation of
processors and other proposals as is the case of
evaluating performance of important hardware aspects of
processors. Environments like ELAN, that is the one we
use, are sufficiently versatile to allow for adequate
implementations and easy modifications that are
intrinsically related with hardware properties like the
size and control of reorder buffers and the method of
predictions used by speculative
processors.pattern-matching in Haskell and the
reduction order that it entails.",
}
@Proceedings{WRS:2002,
editor = "Bernhard Gramlich and Salvador Lucas",
title = "Workshop on Reduction Strategies in Rewriting and
Programming",
booktitle = "Workshop on Reduction Strategies in Rewriting and
Programming",
conference = "2nd International Workshop",
key = "WRS",
pages = "vi+143",
year = "2002",
month = jul # " 21",
venue = "Copenhagen, Denmark",
publisher = "Verlag Berger",
address = "Horn, Austria",
ISBN = "3-85028-352-6",
}