<?xml version="1.0" encoding="UTF-8"?>
<TEI xml:space="preserve" xmlns="http://www.tei-c.org/ns/1.0" 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
xsi:schemaLocation="http://www.tei-c.org/ns/1.0 https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd"
 xmlns:xlink="http://www.w3.org/1999/xlink">
	<teiHeader xml:lang="en">
		<fileDesc>
			<titleStmt>
				<title level="a" type="main">The 6th International Workshop on the Implementation of Logics</title>
			</titleStmt>
			<publicationStmt>
				<publisher/>
				<availability status="unknown"><licence/></availability>
				<date type="published" when="2006-11-03">November 3, 2006</date>
			</publicationStmt>
			<sourceDesc>
				<biblStruct>
					<analytic>
						<author>
							<persName><forename type="first">Christoph</forename><surname>Benzmüller</surname></persName>
							<affiliation key="aff0">
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff17">
								<orgName type="department">FR Informatik</orgName>
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<settlement>Saarbrücken</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff20">
								<orgName type="department">Institut für Informatik</orgName>
								<orgName type="institution">Technische Universität München</orgName>
								<address>
									<addrLine>Boltzmannstr. 3</addrLine>
									<postCode>D-85748</postCode>
									<settlement>Garching</settlement>
								</address>
							</affiliation>
							<affiliation key="aff1">
								<orgName type="institution">University of Southampton</orgName>
								<address>
									<settlement>England</settlement>
								</address>
							</affiliation>
							<affiliation key="aff18">
								<orgName type="laboratory">Computer Laboratory</orgName>
								<orgName type="institution">The University of Cambridge</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Bernd</forename><surname>Fischer</surname></persName>
							<email>b.fischer@ecs.soton.ac.uk</email>
							<affiliation key="aff1">
								<orgName type="institution">University of Southampton</orgName>
								<address>
									<settlement>England</settlement>
								</address>
							</affiliation>
							<affiliation key="aff18">
								<orgName type="laboratory">Computer Laboratory</orgName>
								<orgName type="institution">The University of Cambridge</orgName>
								<address>
									<country key="GB">UK</country>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Geoff</forename><surname>Sutcliffe</surname></persName>
							<affiliation key="aff2">
								<orgName type="institution">University of Miami</orgName>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Wolfgang</forename><surname>Ahrendt</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Stephan</forename><surname>Schulz</surname></persName>
							<email>schulz@eprover.org</email>
						</author>
						<author>
							<persName><forename type="first">Frank</forename><surname>Theiß</surname></persName>
							<affiliation key="aff0">
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff17">
								<orgName type="department">FR Informatik</orgName>
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<settlement>Saarbrücken</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff20">
								<orgName type="department">Institut für Informatik</orgName>
								<orgName type="institution">Technische Universität München</orgName>
								<address>
									<addrLine>Boltzmannstr. 3</addrLine>
									<postCode>D-85748</postCode>
									<settlement>Garching</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<persName><forename type="first">Matt</forename><forename type="middle">J</forename><surname>Kaufmann</surname></persName>
							<email>kaufmann@cs.utexas.edu</email>
						</author>
						<author>
							<persName><forename type="first">Strother</forename><surname>Moore</surname></persName>
							<email>moore@cs.utexas.edu</email>
						</author>
						<author>
							<persName><forename type="first">Sandip</forename><surname>Ray</surname></persName>
							<email>sandip@cs.utexas.edu</email>
						</author>
						<author>
							<persName><forename type="first">Erik</forename><surname>Reeber</surname></persName>
							<email>reeber@cs.utexas.edu</email>
						</author>
						<author>
							<persName><forename type="first">Guido</forename><surname>Fiorino</surname></persName>
							<email>guido.fiorino@unimib.it</email>
						</author>
						<author>
							<persName><forename type="first">Alessandro</forename><surname>Avellone</surname></persName>
							<email>alessandro.avellone@unimib.it</email>
						</author>
						<author>
							<persName><forename type="first">Ugo</forename><surname>Moscato</surname></persName>
							<email>ugo.moscato@unimib.it</email>
						</author>
						<author>
							<persName><forename type="first">Steffen</forename><surname>Hölldobler</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Olga</forename><surname>Skvortsova</surname></persName>
							<email>skvortsova@iccl.tu-dresden.de</email>
						</author>
						<author>
							<persName><forename type="first">Martin</forename><surname>Anbulagan</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Konstantin</forename><surname>Giese</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Stephan</forename><surname>Korovin</surname></persName>
						</author>
						<author>
							<persName><surname>Schulz</surname></persName>
						</author>
						<author>
							<persName><forename type="first">Tjark</forename><surname>Weber</surname></persName>
							<email>webertj@in.tum.de</email>
							<affiliation key="aff0">
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff17">
								<orgName type="department">FR Informatik</orgName>
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<settlement>Saarbrücken</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
							<affiliation key="aff20">
								<orgName type="department">Institut für Informatik</orgName>
								<orgName type="institution">Technische Universität München</orgName>
								<address>
									<addrLine>Boltzmannstr. 3</addrLine>
									<postCode>D-85748</postCode>
									<settlement>Garching</settlement>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff3">
								<orgName type="institution">Chalmers University of Technology</orgName>
								<address>
									<country key="SE">Sweden</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff4">
								<orgName type="department">Anbulagan (National ICT Australia</orgName>
								<address>
									<country key="AU">Australia</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff5">
								<orgName type="department">Serge Autexier (</orgName>
								<orgName type="institution">Universität des Saarlandes</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff6">
								<orgName type="department">Chad Brown (Universitt</orgName>
								<orgName type="institution">des Saarlandes</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff7">
								<orgName type="department">Hans de Nivelle (Max-Planck Institut fr Informatik</orgName>
								<orgName type="institution">Alexander Fuchs (University of Iowa</orgName>
								<address>
									<country>Germany), USA</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff8">
								<orgName type="department">Thomas Hillenbrand (Max-Planck Institut für Informatik</orgName>
								<orgName type="institution">Boris Konev (University of Liverpool</orgName>
								<address>
									<country>Germany), England</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff9">
								<orgName type="institution">Konstantin Korovin (University of Manchester</orgName>
								<address>
									<country key="GB">England</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff10">
								<orgName type="institution">Albert Oliveras (Technical University of Catalonia</orgName>
								<address>
									<country key="ES">Spain</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff11">
								<orgName type="department">Brigitte Pientka</orgName>
								<orgName type="institution">McGill University</orgName>
								<address>
									<country key="CA">Canada</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff12">
								<orgName type="department">Stephan Schulz (</orgName>
								<orgName type="institution">Technische Universität Mnchen</orgName>
								<address>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff13">
								<orgName type="institution">Volker Sorge (University of Birmingham</orgName>
								<address>
									<country key="GB">England</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff14">
								<orgName type="institution">Alwen Tiu (Australian National University</orgName>
								<address>
									<country key="AU">Australia</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff15">
								<orgName type="institution">Ullrich Hustadt (University of Liverpool</orgName>
								<address>
									<country key="GB">England</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff16">
								<orgName type="institution">Technische Universität München</orgName>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff19">
								<orgName type="department">Department of Computer Sciences</orgName>
								<orgName type="institution">University of Texas at Austin Austin</orgName>
								<address>
									<postCode>78712</postCode>
									<region>TX</region>
									<country key="US">USA</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff21">
								<address>
									<settlement>München</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff22">
								<orgName type="department">Dipartimento di Metodi Quantitativi per l&apos;Economia</orgName>
								<orgName type="institution">Università Milano-Bicocca</orgName>
								<address>
									<addrLine>Piazza dell&apos;Ateneo Nuovo, 1</addrLine>
									<postCode>20126</postCode>
									<settlement>Milano</settlement>
									<country key="IT">Italy</country>
								</address>
							</affiliation>
						</author>
						<author>
							<affiliation key="aff23">
								<orgName type="department">International Center for Computational Logic</orgName>
								<orgName type="institution">Technische Universität Dresden</orgName>
								<address>
									<settlement>Dresden</settlement>
									<country key="DE">Germany</country>
								</address>
							</affiliation>
						</author>
						<title level="a" type="main">The 6th International Workshop on the Implementation of Logics</title>
					</analytic>
					<monogr>
						<imprint>
							<date type="published" when="2006-11-03">November 3, 2006</date>
						</imprint>
					</monogr>
					<idno type="MD5">5D288325B401D48BC52DB32EAB9C5B93</idno>
				</biblStruct>
			</sourceDesc>
		</fileDesc>
		<encodingDesc>
			<appInfo>
				<application version="0.7.2" ident="GROBID" when="2023-03-24T16:24+0000">
					<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>
					<ref target="https://github.com/kermitt2/grobid"/>
				</application>
			</appInfo>
		</encodingDesc>
		<profileDesc>
			<textClass>
				<keywords>
					<term>Presentation Schedule Session 1</term>
					<term>9</term>
					<term>00-10</term>
					<term>00 Invited Talk</term>
					<term>Algorithms and Data Structures for First-Order Equational Deduction 30 LIFT-UP</term>
					<term>Lifted First-Order Planning Under Uncertainty</term>
				</keywords>
			</textClass>
			<abstract>
<div xmlns="http://www.tei-c.org/ns/1.0"><p>The IWIL workshop series brings together developers and users of systems that implement reasoning in logic, to share information about successful implementation techniques for automated reasoning systems and similar programs. Systems of all types (automated, interactive, etc), and for all logics (classical, non-classical, all orders, etc) are of interest to the workshop. Contributions that help the community to understand how to build useful and powerful reasoning systems in practice are of particular interest. Two invited papers, and six research papers selected from the submissions (by the program committee listed below), will be presented. A panel discussion will enable all attendees to participate in open discussion. Previous IWIL workshops include the 5th IWIL (Montevideo, Uruguay), 4th IWIL (Almati, Kazakhstan), 3rd IWIL (Tbilisi, Georgia), 2nd IWIL (Havana, Cuba), and 1st IWIL (Reunion Island).</p></div>
			</abstract>
		</profileDesc>
	</teiHeader>
	<text xml:lang="en">
		<body>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>First-order logic with equality is one of the most widely used logics. While there is a large number of different approaches to theorem proving in this logic, the field has been dominated by saturation-based systems using some variant of the superposition calculus [BG90, BG94, BG98, NR01], i.e. systems that employ paramodulation, restricted by ordering constraints and possibly literal selection, as the main inference mechanism, and rewriting and subsumption as the main redundancy elimination techniques. Many systems complement equational reasoning with explicit resolution for non-equational literals. Examples of provers based on the combination of paramodulation, rewriting and (possibly) resolution include SPASS [WBH + 02], Vampire <ref type="bibr" target="#b9">[RV01]</ref>, Otter <ref type="bibr" target="#b7">[MW97]</ref>, its successor Prover9, and E <ref type="bibr">[Sch02,</ref><ref type="bibr">Sch04b]</ref>.</p><p>The power of a saturating prover depends on four different, but interrelated aspects:</p><p>• The calculus (What inferences are necessary and possible?)</p><p>• The inference engine (How are they implemented?)</p><p>• The search organization (How is the the proof state organized and which invariants are maintained?)</p><p>• The heuristic control of the search (Which subset of inferences is performed and in what order?)</p><p>In this talk I will discuss the basic concepts of the given clause saturation algorithm and its implementation. In particular, I will describe the behaviour of the DISCOUNT loop version of this algorithm on practical examples, and discuss how this affected the choice of algorithms and data structures for E. I will try point out some low-hanging fruit, where a lot of performance can be gained for relatively modest investment in code complexity, as well as some more advanced techniques that have a significant pay-off.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Rewrite-Based Theorem Proving</head><p>Superposition is a refutational calculus. It attempts to make the potential unsatisfiability of a formula (consisting of axioms and negated conjecture) explicit using saturation. The search state is represented by a set of first-order clauses (disjunctions of literals over terms). The proof search employs two different mechanisms. First, new clauses are added to the proof state by generating inference rules, using existing clauses as premises. Secondly, simplifying or contracting inference rules are used to remove clauses or to replace them by simpler ones. The proof is successful if this process eventually produces the empty clause, i.e. an explicit contradiction.</p><p>While the generating inferences are crucial to establish the theoretical completeness of the calculus, extensive use of contracting inferences has turned out to be indispensable for actually finding proofs.</p><p>The practical difficulty of implementing a high-performance theorem prover results mostly from the fact that the proof state grows extremely fast with the depth of the proof search. A successful implementation has to be able to handle large data sets, efficiently find potential inference partners, and in particular, be able to quickly identify clauses that can be simplified or removed.</p><p>While the number of inference and simplification rules can be much larger, in nearly all cases only three rules are critical from a performance point of view:</p><p>• Superposition (including resolution as a special case) or a similar restricted form of paramodulation is by far the most prolific generating inference rule. Typically, between 95% and 99% of clauses in a non-trivial proof search are generated by a paramodulation inference. Paramodulation can essentially be described as a combination of instantiation (guided by unification) and lazy conditional rewriting. For superposition, this inference is further restricted by ordering constraints (a smaller term cannot be replaced by a larger term) and literal selection (only certain literals need to be considered as applicable or as targets of the application).</p><p>• Unconditional rewriting allows the simplification of a clause by replacing a term with an equivalent, but simpler term. In contrast to superposition, no instantiation of the rewritten clause takes place, and a term is always replaced by a smaller one. As a consequence, this is a simplifying inference, and the original of the rewritten clause can be discarded. Practical experience has shown that in most cases rewriting drastically improves the search behaviour of a prover.</p><p>• Subsumption allows the elimination of clauses if a more general clause already is known. As such, it helps to reduce the search space explosion typical for saturating provers. However, finding subsumption relations between clauses can be very expensive.</p><p>For completeness, it is necessary to eventually consider all combinations of nonredundant clauses as premises for generating inferences. To avoid the overhead of keeping track of each possible combination, the given-clause algorithms splits the proof state into two distinct subsets, the set P of processed (or active) clauses, and the set U of unprocessed (or passive) clauses, and maintains the invariant that all necessary inferences between clauses in P have been performed. On the most abstract level, the algorithm picks a clause from U , performs all inferences with this clause and clauses from P (adding the resulting newly deduced clauses to U ), and puts it into P . This process is repeated until either the empty clause is deduced, the set U runs empty (in which case there is not proof), or some time or resource limit has been reached (in which case the prover terminates without a useful result). The major heuristic decision in this algorithm is in which order clauses from U are picked for processing.</p><p>The two main variants of the given clause algorithm differ in how they integrate simplification into this basic loop. The Otter loop maintains the whole proof state in a fully simplified (or interreduced) state. It uses all unit equations for rewriting and all clauses for subsumption attempts. The DISCOUNT loop, on which E is built, only maintains this invariant for the set P of processed clauses. It simplifies newly generated clauses once (to weed out obviously redundant clauses and to aid heuristic evaluation, but it does not use them for rewriting or subsumption until they are actually selected for processing. It thus trades reduced simplification for a higher rate of iterations of the main loop. Opinions differ on which of the two designs is more efficient (see e.g. <ref type="bibr" target="#b10">[RV03]</ref>).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Representing the Proof State</head><p>Analysis of the behavior of provers over a large set of examples has shown that the size of U typically grows about quadratically with the size of P . Therefore for non-trivial proof problems, the vast majority of clauses is in U , and unprocessed clauses are responsible for most of the resources used by the prover. The overall proof state can easily reach millions clauses, with a corresponding number of literals and terms as their constituents.</p><p>Surprisingly, with naive implementations much of the CPU time can be taken up with seemingly trivial operations. In the first version of DISCOUNT <ref type="bibr" target="#b0">[ADF95,</ref><ref type="bibr" target="#b4">DKS97]</ref> we found to our surprise that assumedly complex operations like first-order unification were negligible, while linear time operations like the naive insertion of clauses into U (organized as a linear list sorted by heuristic evaluation) took up around half of the total search time.</p><p>The first and most basic data type for a first-order prover is the term. Simple, direct implementations realize terms as ordered trees, with each node labeled by a function or variable symbol, and with the subterms of a term as successor nodes in the tree. Alternatives to this basic design are either increasingly optimized for size, as flat-terms, string terms, and eventually implicit terms (reconstructed on demand) as in the Waldmeister prover <ref type="bibr" target="#b5">[GHLS03]</ref>, or they try to store more and more precomputed information at the term nodes to speed up repetitive operations. This is particularly successful if sets of terms are not represented as sets of trees, but as a shared directed acyclic graph, with repeated occurrences of any given term or subterm only represented once. This approach has been followed in E. We found sharing factors varying from 5-15 in the unit equational case, up to several 1000 in the general non-Horn case.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Rewriting</head><p>Shared terms do not only allow a reduction in memory consumption, they also allow the sharing of (some) term-related inferences. In particular, they allow the sharing of rewrite operations and, even more importantly, normal form information among terms. For any rewritten term, we can add a link pointing to the result of the rewrite operation. If we encounter the same term again in the future, we can just follow this link instead of performing a real search for matching and applicable rewrite rules. Less glorious, but not less effective, is the sharing of information about non-rewritability. In either version of the given clause algorithm, the set of potential rewrite rules changes over time, and the strength of the rewrite relation grows monotonically. If a term is in normal form with respect to all rules at a given time, no older rule has to be ever considered again for rewriting this term. This criterion can be integrated into indexing techniques to further speed up normal form computation.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Subsumption</head><p>A single rewriting step is usually cheap to perform. The high cost of rewriting comes from the large number of term positions and rules to consider. For subsumption, on the other hand, even a single clause-clause check can be very expensive, as the problem is known to be NP-complete <ref type="bibr">[KN86]</ref>. Unless reasonable care is taken, the exponential worst case can indeed be encountered, and with clauses that are large enough that this hurts performance significantly<ref type="foot" target="#foot_0">1</ref> .</p><p>To overcome this problem, a number of strategies can be implemented. First, presorting of literals with a suitable ordering stable under substitutions can greatly reduce the number of permutations that need to be considered. Secondly, a number of required conditions for subsumption can be tested rather cheaply. If any of these tests fail, the full subsumption test becomes superfluous.</p><p>Feature vector indexing <ref type="bibr" target="#b12">[Sch04a]</ref> arranges several of these tests in a way that they can be performed not only for single clauses, but for sets of clauses at a time.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Conclusion</head><p>Engineering a modern first-order theorem prover is part science, part craft, and part art. Unfortunately, there is no single exposition of the necessary knowledge available at the moment -something that the community should aim to rectify.</p><p>[Sch04b] S. Schulz. System Description: E 0.81. In D. <ref type="bibr">Basin</ref>  </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Term indexing has become standard in first order theorem proving and is applied in all major systems in this domain [RV02, Sch02, WBH + 02]. An overview on first order term indexing is given in <ref type="bibr" target="#b29">[RSV01]</ref> and <ref type="bibr" target="#b26">[NHRV01]</ref> presents an evaluation of different techniques. Comparably few term indexing techniques have been developed and studied for higher order logic. An example is Pientka's work on higher order substitution tree indexing <ref type="bibr" target="#b27">[Pie03]</ref>.</p><p>In this paper we present a new approach to higher order term indexing developed for the higher order resolution prover LEO-II<ref type="foot" target="#foot_1">1</ref> , the successor of LEO <ref type="bibr" target="#b15">[BK98]</ref>. Our approach is motivated by work presented in <ref type="bibr" target="#b34">[TSP06]</ref>, which studies the application of indexing techniques for interfacing between theorem proving and computer algebra.</p><p>Pientka's approach is based on substitution tree indexing and relies on unification of linear higher order patterns. While higher order pattern unification is a comparatively high level operation, the approach we present here is based on coordinate and path indexing <ref type="bibr" target="#b33">[Sti89]</ref> and thus relies on lower level operations, for example, operations on hashtables. Apart from indexing and retrieval of terms, we particularly want to speedup basic operations such as replacement of (sub-)terms and occurs checks.</p><p>2 Terms in de Bruijn Notation LEO (and its successor LEO-II under development) is based on Church's simple type theory, that is, a logic built on top of the simply typed λ-calculus <ref type="bibr" target="#b16">[Chu40]</ref>. In contrast to LEO, our new term data structure for LEO-II uses de Bruijn <ref type="bibr">[dB72]</ref> indices for the internal representation of bound variables. In this paper, de Bruijn indices have the form x i , where x is a nameless dummy and i the actual index. Constants and free variables in LEO-II, called symbols in the remainder of this paper, have named representations. Due to Currying, applications have only one argument term in LEO-II. 2  Terms in LEO-II are thus defined as follows:</p><p>• Symbols are either constant symbols (taken from an alphabet Σ) or (free, existential) variable symbols (taken from an alphabet V). Every symbol is a term.</p><p>• Bound variables, represented by de Bruijn indices x i for some index i ∈ {1, 2, . . .}, are terms.</p><p>• If s and t are terms, then the application s@t is a term.</p><p>• If t is a term, then the abstraction λ.t is a term.</p><p>For bound variables x i , the de Bruijn index i denotes the distance between the variable and its binder in terms of scopes. Scopes are limited by occurrences of λ-binders, thus the index i is determined by the number of occurrences of λ-binders between the variable and its binder.</p><p>For instance, the term in de Bruijn notion. While de Bruijn indices ease the treatment of α-conversion in the implementation, they are less intuitive. As it can be seen in the above example, different occurrences of the same bound variable may have different de Bruijn indices. This is the case here for b, which translates to both x 0 and x 1 . Vice versa, different occurrences of the same de Bruijn index may refer to different λ-binders. This is the case for x 0 , which relates to both the bound variable b (first occurrence of x 0 ) and the bound variable c (second occurrence of x 0 ). Similarly, x 1 is related to the bound variables b and the bound variable a.</p><p>2 Alternative representations, for example, spine notation <ref type="bibr" target="#b17">[CP97]</ref>, offer at first sight shorter paths to term parts that are relevant for a number of operations. The difference is primarily the order in which the parts of a term can be accessed. In the case of the spine notation, for example, the head symbol of a term can be directly accessed. In our approach we try to offer these shortcuts by representing indexed terms in a graph structure. This allows to adopt additional ways of accessing (sub-)terms by introducing additional graph edges. For instance, the head symbol of each term and its position are indexed in our data structure.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">Shared Representation of Terms</head><p>Terms in LEO-II have a perfectly shared representation, that is, all occurrences of syntactically equal terms (in de Bruijn notation) are represented by a single instance. An exception are bound variables, where instances of the same variable may have different de Bruijn indices. The treatment of bound variables is described further in Section 3.4.</p><p>Terms are represented as term nodes. Term nodes are numbered by n ∈ {1, 2, . . .} in the order they are created. In the following, term nodes are referred to either by their number or by their graph representation, which is defined as follows:</p><p>• For each symbol s ∈ Σ occurring in some term, a term node symbol(s) is created.</p><p>• For each bound variable x i occurring in some term, a term node bound(i) is created.</p><p>• If an application s@t occurs in some term, where s is represented by term node i and t by term node j, a term node application(i, j) is created.</p><p>• If an abstraction λt occurs in some term, where t is represented by term node i, a term node abstraction(i) is created.</p><p>This graph representation of terms is implemented using hashtables:</p><p>• Hashtable abstr with scope : IN → IN is used to lookup abstractions with a given scope i.</p><p>• Hashtable appl with func : IN → IN → IN is used to lookup an application with a given function i and argument j.</p><p>• Hashtable appl with arg : IN → IN → IN is used to lookup an application with a given argument j and function j. This is similar to appl with func, but the hashtable keys are used here in reversed order.</p><p>This hashtable system can be employed to retrieve term nodes in a similar way as in a relational database. It can be used to retrieve single terms as well as sets of terms, for example, all application term nodes whose function term is represented by node i.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">Partial Syntax Trees</head><p>Term indexing in LEO-II is based on partial syntax trees (PST), a concept that is newly introduced in this paper. Partial syntax trees are used to indicate positions of a symbol or a particular subterm within a term. PSTs are called partial because they only represent relevant parts of a term. Examples are PSTs recording symbol occurrences in a term, where relevant part means, that the term part in question actually contains an occurrence of that symbol. Such PSTs allow for early detection of branches in a term's syntax tree with no occurrences of a specific symbol, since these branches are not represented in the PST for this symbol.</p><p>In LEO-II's term system (remember that this is based on simply typed λ-calculus with Currying) a term position is defined as follows:</p><p>• While symbol nodes and bound variable nodes have no children in a term's syntax tree, abstraction nodes respectively application nodes have exactly one child respectively exactly two children. The relative position of these children to their parent node is described by either abstr (the relation between an abstraction node and its scope), func or arg (the relation between an application node and its function term respectively its argument term).</p><p>• A position is defined as a (possibly empty) sequence of relative positions. Starting from the top position in a term, each entry of the sequence describes one traversal step in the term's syntax tree.</p><p>• An empty sequence of relative positions represents the root position or empty position, which is the topmost position in a term.</p><p>Consider, for example, the term (λ.x 0 )@(f @a). Its subterms occur at the following positions:</p><p>(λ.x 0 )@(f @a) : [] λ. Based on this notion of positions, we introduce the notion of partial syntax trees. As an example<ref type="foot" target="#foot_3">3</ref> , consider the term a = 0 • a, which translates in Curried form to (= @a)@((•@0)@a). The example term's syntax tree is given by: @ @ @ = a @ a • 0 A partial syntax tree (PST) is a tree of nodes corresponding to positions in a term. Each term position which occurs in a PST is represented as a node which</p><p>• has up to three child trees<ref type="foot" target="#foot_4">4</ref> (these children are partial syntax trees which correspond to the terms at one of the relative positions abstr, func or arg), and</p><p>• may be annotated by some data.</p><p>A partial syntax tree t is denoted by pst(t abstr , t func , t arg ), where t abstr is the PST of the scope of t if t is an abstraction, and where t func and t arg are the PSTs of the function term and the argument term of t if t is an application. If no position in a branch of the syntax tree is annotated by some data, this branch is empty and is denoted by an underscore ( ).</p><p>The PST corresponding to the whole term in the above example and its annotations is thus given by: p 1 = pst ( , p 2 , p 5 ) p 2 = pst ( , p 3 , p 4 ) p 3 = pst ( , , ) with annotation = p 4 = pst ( , , ) with annotation a p 5 = pst ( , p 6 , p 9 ) p 6 = pst ( , p 7 , p 8 ) p 7 = pst ( , , ) with annotation • p 8 = pst ( , , ) with annotation 0 p 9 = pst ( , , )</p><p>with annotation a</p><p>When the term is added to the index, however, not the PST of the entire term is recorded, but the PSTs of each of the occurring symbols (and subterms). For example, the PST of all occurrences of the symbol a in a = 0 • a is given by: @ @ @ a a</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>PST for a</head><p>If a symbol occurs at a term position, the corresponding PST entry is annotated by that symbol. If a branch of a term's syntax tree has no occurrences of the symbol in question, the PST contains no entry for this branch. The PST for occurrences of symbol a in the above example is thus given by: p a1 = pst( , p a2 , p a4 ) p a2 = pst( , , p a3 ) p a3 = pst( , , ) with annotation a p a4 = pst( , , p a5 ) p a5 = pst( , , ) with annotation a</p><p>Similarly, the PSTs for the remaining symbols are recorded: @ @ = @ @ @ • @ @ @ 0 PST for = PST for • PST for 0</p><p>If the PST of all occurrences of a symbol (or subterm) t in a given term t is available, this provides a basis for speeding up replacements of t . Also a costly occurs check is avoided, since the existence or non-existence of a PST for a symbol can be used as criterion. The nodes of the PST for t determine the nodes in t that have to be modified when performing the replacement operation, and all nodes in t that are not represented in the PST for t remain unchanged (i.e., the recursion over the term structure for replacement operations is pruned early).</p><p>When replacing a by (f @b) in the above example, the operation proceeds as follows:</p><p>• The operations starts at root position with term (= @a)@((•@0)@a) and with the corresponding PST for a, p a1 = pst ( , p a2 , p a4 ). As both the function child p a2 and the argument child p a3 of the PST are nonempty, the replacement operation recurses over both the function term (= @a) and the argument term ((•@0)@a):</p><p>[(f @b)/a](= @a)@((•@0)@a) ⇒ ([(f @b)/a](= @a))@([(f @b)/a]((•@0)@a))</p><p>• To replace a in (= @a) with corresponding PST p a2 = pst( , , p a3 ), only the argument term has to be processed. The child PST corresponding to the function term in p a2 is empty, indicating that there are no further occurrences of a in this term. Thus we have:</p><p>[(f @b)/a](= @a) ⇒ (= @([(f @b)/a]a))</p><p>and analogously for ((•@0)@a)) and p a4 = pst ( , , p a5 ), where again processing the function term (•@0) is avoided:</p><p>[(f @b)/a]((•@0)@a)) ⇒ ((•@0)@([(f @b)/a]a))</p><p>• Finally a is replaced in the term a with corresponding PST p a3 respectively p a5 . Both p a3 and p a5 have no child nodes and are annotated with a, so the result is in both cases the replacement term (f @b):</p><formula xml:id="formula_0">[(f @b)/a]a ⇒ (f @b)</formula><p>The result of this operation is thus:</p><p>[(f @b)/a](= @a)@((•@0)@a) ⇒ (= @(f @b))@((•@0)@(f @b))</p><p>During the operation, only those branches of the syntax tree with an actual occurrence of a are processed and branches with no occurrences of a, here the terms = and (•@0), are avoided. In this example, only five out of nine term nodes have to be processed due to the guidance provided by the PST.</p><p>As a probably useful indicator for the speedup for replacements obtainable this way we therefore investigate the ratio of term size to PST size counted in nodes of the tree, that is, the number of abstractions, applications, symbols and bound variables. As is illustrated above, this ratio is a measure for the speedup which we expect for replacement operations.</p><p>In the above example, the term size is 9 (we have 9 nodes), which gives the following rates for the occurring symbols: We examined an excerpt of Jutting's Automath encoding of Landau's book Grundlagen der Analysis <ref type="bibr" target="#b35">[vBJ77,</ref><ref type="bibr" target="#b22">Lan30]</ref> with over 900 definitions and theorems (see Section 4 for details) and found an average PST size/term size rate of 0.21 for symbol occurrences. When indexing nonprimitive terms, too (that is, applications and abstractions), this rate dropped to 0.12.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3">Building the Index</head><p>The index records whether and at which positions a subterm<ref type="foot" target="#foot_5">5</ref> occurs in a term. Similar to relational databases, both subterms occurring in a given term and terms in which a given subterm occurs are indexed. Thus, the index can be used to find terms in the database with occurrences of particular subterms and also to speed up logical operations such as substitution by avoiding occurs checks.</p><p>The index is built recursively, starting from symbols, which are the leaf nodes in a term's syntax tree. The only term which occurs in a symbol is the symbol itself at root position. Nonprimitive terms, that is abstractions and applications are built up as follows:</p><p>• The subterms occurring in an abstraction are all subterms which occur in the scope of the abstraction. For a symbol whose occurrences in a term A are recorded in the PST t , its occurrences in the abstraction λ.A are given by t = pst(t , , ).</p><p>• The subterms occurring in an application are all subterms which occur in its function term or in its argument term. For a symbol whose occurrences in the function and argument term are recorded in the PSTs t func and t arg , the PST t recording its occurrences in the application is given by t = pst( , t func , t arg ). If the term occurs only in the argument respectively in the function of an application, t func respectively t arg is empty.</p><p>Furthermore each primitive and nonprimitive term is recorded to occur as a subterm of itself at root position.</p><p>The result is a PST for each subterm of the term to be indexed, describing the occurrences of this subterm. These PSTs are added to the hashtable occurrences. Additionally, terms are indexed according to their subterms in a second hashtable occurs in. A third hashtable is occurrs at, which is used to index terms according to subterms at a given term position. Thus the core of the index consists of:</p><p>• Hashtable occurrences : IN → IN → PST indexes occurrences of subterms (the second key) in a given term (the first key). The indexed value is a PST of the positions where the subterm occurs. If a subterm does not occur, then there is no entry in the hashtable.</p><p>• Hashtable occurs in : IN → IN * is used to index a list of all terms in which a given subterm (the key) occurs.</p><p>• Hashtable occurrs at : pos → IN → IN * is a hashtable to index all terms in which a given subterm (the second key) occurs at a given position (the first key).</p><p>For example, occurrences of symbol a in the example term (= @a)@((•@0)@a) are indexed by the following hashtable updates (we assume that a is represented by term node i and (= @a)@((•@0)@a) by term node j):</p><p>• add pst a with first key j and second key i in occurrences • add j with key i in occurs in • add j to the set hashed in occurs at with first key [func; arg] and second key i;</p><p>if no such set exists in the hashtable, add the singleton {j}</p><p>• add j to the set hashed in occurs at with first key [arg; arg] and second key i; if no such set exists in the hashtable, add the singleton {j}</p><p>The basic operations of adding a term to the index take constant time (except for rehashing). The indexing of a term of length n takes time O(n).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4">Bound Variables</head><p>Bound variables play a special role in the term system. To see this, remember our example from the beginning, that is, the term λa.λb.((= b)((λc.(cb))a)) or, with de Bruijn indices, λλ.((= x 0 )((λ.(x 0 x 1 ))x 1 )). This example shows that two occurrences of the same bound variable may have syntactically different de Bruijn indices and that the de Bruijn indices of occurrences of different variables may be syntactically equal. It is desirable to provide quick access to all variables bound by a given binder to speedup β-reduction and related operations such as raising or lowering of bound variable indices. We will now illustrate our solution to this issue. Remember that indexed terms are always kept in βη normal form, hence, normalisation is mandatory after instantiation of existential variables or expansion of defined constants (if the modified terms shall be indexed again).</p><p>The syntax tree of our example term in de Bruijn notation is</p><formula xml:id="formula_1">λ λ @ @ @ = x 0 λ x 1 @ x 0 x 1</formula><p>In this case the bound variable b has two instances which are denoted by x 0 and x 1 , while x 0 (resp. x 1 ) can denote both c or b (resp. b or a). Since bound variables are indexed as described in Section 3.3, this gives a somewhat scattered information on where to find the variables that are bound by one particular λ-binder. This kind of information, however, is important in practice, for example, to support efficient βreduction. We therefore once more employ PSTs to describe the occurrences of variables bound by one and the same λ-binder:</p><formula xml:id="formula_2">λ 1 λ 2 @ @ x 1 λ 1 λ 2 @ @ @ x 0 λ 3 @ x 1 λ 1 λ 2 @ @ λ 3 @ x 0</formula><p>Variables bound by λ 1 Variables bound by λ 2 Variables bound by λ 3</p><p>For example, the PST indicating occurrences of variables bound by λ 2 in the above example is given by:</p><formula xml:id="formula_3">p 1 = pst(p 2 , , ) p 2 = pst(p 3 , , ) p 3 = pst( , p 4 , p 6 ) p 4 = pst( , , p 5 ) p 5 = pst( , , )</formula><p>with annotation 0 p 6 = pst( , p 7 , ) p 7 = pst(p 8 , , ) p 8 = pst( , , p 9 ) p 9 = pst( , , )</p><p>with annotation 1</p><p>The explicit notation of variables bound by λ 2 and λ 3 is analogous and therefore omitted here.</p><p>This list of PSTs is also recorded in LEO-II's index, in the order shown above. Each PST is assigned a scope number, where the scopes are defined by the occurrence of λbinders. When traversing the syntax tree, the PST recording occurrences of variables bound by the first λ-binder is assigned scope number 1, the PST related to the second λ-binder has scope number 2 and so on.</p><p>While the term λ((λ.x 0 ) = ((λ.(x 0 x 1 ))x 1 )) is closed, that is, all de Bruijn indexed variables are bound by a λ-binder within the term, this is not always true for its subterms. Unbound variables occur, for example, in λ.(x 0 x 1 ), where x 1 refers to a binder outside the term. In particular, all primitive terms consisting only of de Bruijn variables refer to a binder outside this term. While the PSTs for bound variables can be constructed as shown above, the determination of the scope numbers deserves a special treatment in case of loose bound variables, that is bound variables without a binder in the given subterm. If a term has occurrences of loose bound variables, their de Bruijn index allows to determine the distance to their (virtual) binder measured in scopes upwards from the term's root position. PSTs for loose bound variables are assigned a scope number s ≤ 0. The PST to denote all occurrences of x 1 in itself is consequently assigned the scope number −1, and the PST denoting the occurrence of x 1 in λ.(x 0 x 1 ) is assigned scope number 0.</p><p>Indexing of bound variable occurrences in a term is used to speedup β-reduction. For each λ-binder the positions of variables bound by this binder are known, thus, only the parts of the term that actually are modified have to be processed. In the context of explicit substitutions <ref type="bibr" target="#b19">[FKP96,</ref><ref type="bibr" target="#b13">ACCL90]</ref>, the implementation of shift and lift operators can furthermore be reduced to recalculation of the offset and elimination of bound variable PSTs from the list.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5">Using the Index</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5.1">Adding and Retrieving Terms:</head><p>Terms are added to and retrieved from an index in a similar way as in coordinate or path indexing <ref type="bibr" target="#b33">[Sti89]</ref>. When a term t is added to the index, the PSTs of symbol occurrences are constructed as described in Section 3.3. Then the following hashtables are updated:</p><p>• In occurrences, the PSTs of the occurring symbols (or subterms) are added.</p><p>• For each occurring symbol (or subterm), t is added to the set of terms which is recorded for that symbol in occurs in. If there is no such entry in occurs in, the singleton {t} is added.</p><p>• For each term position in t, t is indexed in occurs at in the same way with the position as first key and the the subterm as second key.</p><p>Furthermore, the PSTs for bound variables are constructed as described in Section 3.4 and are added to the hashtable boundvars.</p><p>For each occurrence of a subterm at a given position in a query term, a set of candidate terms is retrieved from hashtable occurs at. Sets of candidate terms can furthermore be retrieved from hashtable occurrences for subterms occurring at unspecified positions. The result of the query is the intersection of all candidate sets obtained this way.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5.2">Speeding up Computation:</head><p>Using the index, efficient occurs checks are reduced to single hashtable lookups. Efficient replacement of a symbol or subterm t is furthermore supported by PSTs recorded in the index (in hashtable occurrences), since these PSTs make it possible to avoid processing of term parts with no occurrences of t. Fast β-reduction is supported by the PSTs recorded in hashtable boundvars.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5.3">Explicit Substitutions:</head><p>Our approach can also support explicit substitutions. Note that subterm occurrences in a term t can be quickly determined as described above. Similarly, the occurrences of a subterm s in the result of applying a substitution σ to a term t can be determined using our indexing technique. To determine occurrences of s in σt where σ = [b/a], occurrences of s and a in t are looked up from the index, as well as occurrences of s in b. Thus we get three PSTs pst s/t , pst a/t and pst s/b . To find all occurrences of s in σt, all positions annotated by a in pst a/t are replaced by a new sub PST pst s/b , and the result is merged with pst s/t . For σ 1 σ 2 . . . σ n t, this operation is cascaded.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Preliminary Evaluation</head><p>Full evaluation of the indexing method presented here is still work in progress. This is because the implementation of LEO-II is still at its very beginning, so that we cannot pursue an empirical evaluation within theorem proving applications with LEO-II at the current stage of development. A purely theoretical examination is difficult and furthermore questionable, as the computational complexity can be expected to heavily depend on the structure of the application domains <ref type="bibr" target="#b26">[NHRV01]</ref>.</p><p>However, we were able to undertake some first experiments which may give us an impression of the efficiency gain we may expect for LEO-II (for example, in comparison to LEO and other higher order theorem provers that do not use term indexing techniques).</p><p>In order to get a realistic impression of the structural characteristics of real world term sets, we indexed a sample selection of 900 theorems and definitions from a HOTPTP <ref type="bibr" target="#b20">[GS06]</ref> version of Jutting's Automath encoding of Landau's book Grundlagen der Analysis <ref type="bibr" target="#b35">[vBJ77,</ref><ref type="bibr" target="#b22">Lan30]</ref>. An overview on the results of this experiment is given in Figure <ref type="figure" target="#fig_4">1</ref>. We will now discuss these results.</p><p>In our study, we determined, for example, the rate of term sharing, which is the average number of parent nodes per node and the average number of terms a given node occurs in. At first sight the average number of parent nodes of 1.68 appears to be relatively low, an impression which is underlined by the high number of nodes with no or one parent node (about 90%). For nodes which are deeply buried in a term's structure, however, the sharing rate multiplies along the path up to root position, so the average number of terms a node occurs in <ref type="bibr">(33.5)</ref>  replacement, substitution and β-reduction, due to the reuse of already indexed subterms. Additionally, the maintenance of the index is supported by data already existing in the index. As most logical operations on terms reuse parts of these terms, the cost to maintain the index is less than indexing a set of terms starting from an empty index, as required for instance, when initially loading a mathematical theory to memory. An indicator for the term retrieval performance is the average number of terms a node occurs in. With an average number of occurrences of 33.5 and a total of 11618 term nodes, a theoretical average of 99.7% of candidate nodes for retrieval can be excluded by checking occurrences of subterms only (compared to a naive approach). By specifying the position of the subterm's occurrence, the set of retrieved terms is further restricted.</p><p>The use of shared terms is responsible for a further improvement of performance similar to the transition from coordinate indexing to path indexing <ref type="bibr" target="#b33">[Sti89]</ref>. While both methods employ a common underlying idea, path indexing is substantially faster. In the former approach terms are discriminated by occurrences of single symbols at specified positions (or coordinates, hence the name). The criterion in the latter is the occurrence of a path, that is the occurrence of a sequence of specified symbols in a descending path in the syntax tree. The retrieval of candidates for one path of length n is thus corresponding to n passes of retrieval in coordinate indexing. We expect a similar effect in our approach due to shared representation of terms, since terms are indexed according to the occurrence of nonprimitive term structures, too. This assumption is supported by the increase of the exclusion rate of 95.7% for symbol occurrences only (with an average number of 493.9 superterms per node) to 99.8% for nonprimitive terms (with an average of 24 superterms per node). This rise corresponds to a theoretical speedup by factor 20.</p><p>We can also predict a significant performance improvement of operations, such as replacement, substitution and occurs check. They all are critical in theorem proving. The indexing method we present here supports occurs checks in constant time, based on simple hashtable lookup. This applies not only to symbols, but also to nonprimitive terms. This also supports global replacement of defined terms (for example, a = b) by their definiendum (for example, ∀P.P a ⇒ P b).</p><p>A measure of the efficiency improvement for replacement operations is the PST/term size rate. The value is 0.21 for symbols, which is relevant, for example, for variable substitution, and which corresponds to a theoretical speedup by factor 5. The value for bound variables, which is relevant for β-reduction, is 0.33, corresponding to a theoretical speedup by factor 3. The probably least common operation is the replacement of nonprimitive terms, as discussed above.</p><p>We are aware of the fact that the results shown here are based on a theoretical juggling with average values. These results may thus differ strongly from the behaviour when used in a realistic application in theorem proving as we intend. This is due to several factors: First we expect the structure of the set of indexed terms to change during operation. In general the basic operations of a theorem prover will increase the sharing rate of some symbols and subterms. This makes occurrences of these terms a less discriminating criterion, on the other hands it decreases the cost of maintenance of the index. The tradeoff of these two factors will be subject to further examination. Second, the evaluation of average values does most likely not correspond to index operation sequences as they actually occur in a realistic theorem proving application.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusion and Future Work</head><p>The main features of the new higher order term indexing method we presented in this paper are shared term representation, relational indexing of subterm occurrences and the use of partial syntax trees. Occurrences of subterms are indexed in several ways and can be flexibly combined to design customised procedures for term retrieval and heuristics. Our method furthermore provides support for potentially costly operations such as global unfolding of defined terms. Our indexing method is based on simple hashtable operations, so there is little computational overhead in term retrieval and maintenance of the index. Indexing of subterm occurrences allows furthermore for an occurs check in constant time. Additionally, the performance is improved by the use of PSTs. Finally, a shared representation of terms helps to keep the costs for maintaining the index low and improves the performance of retrieval operations.</p><p>The indexing technique presented in this paper has been implemented in OCaml [LDG + 05]. A proper evaluation of the approach within a real theorem proving context is still work in progress. However, first experiments are promising.</p><p>The preliminary evaluation in this paper is based on some statistical data we computed for 900 example terms from an encoding of Landau's textbook. To what extend our predictions on efficiency gain are realistic will be examined in future work.</p><p>Furthermore, as the experience from first order term indexing shows, most successful systems employ a combination of various indexing methods which are used complementarily. We will thus also evaluate which aspects of our indexing method result in a real performance gain and which do not. Our evaluation will be done with the LEO-II prover as soon as a first version of its resolution loop is available. In the LEO-II context we are particularly interested in the fast determination of clauses (resp. literals and terms in clauses) with respect to certain filter criteria. In the extreme case, these criteria may be based on complex operations such as higher order pattern unification <ref type="bibr" target="#b28">[PP03]</ref> or even full higher order unification <ref type="bibr" target="#b21">[Hue75,</ref><ref type="bibr" target="#b32">SG89]</ref>.</p><p>Our approach differs from Pientka's work, which has a stronger emphasis on term retrieval. Pientka's method is based on high level operations such as unification of linear higher order patterns to construct substitution trees, while our method relies mainly on simpler low level operations and makes strong use of hashtables. Both methods appear complementary to some extend, which motivates the study of a combination of both.</p><p>Future work also includes the investigation of alternative term representation techniques, such as suspension calculus <ref type="bibr" target="#b25">[Nad02]</ref>, spine representation <ref type="bibr" target="#b17">[CP97]</ref> and explicit substitutions <ref type="bibr" target="#b19">[FKP96,</ref><ref type="bibr" target="#b13">ACCL90]</ref> in the context of our term indexing approach. We are especially interested in the combination of aspects from different representation techniques within a single graph structure.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>Recent years have seen rapid advancement in the capacity of automatic reasoning tools, most notably for decidable theories such as propositional calculus and Presburger arithmetic. For instance, modern BDD packages and Boolean satisfiability solvers can automatically solve problems with tens of thousands of variables and have been successfully used to reason about commercial hardware system implementations. This advancement has sparked significant interest in the general-purpose, interactive theorem proving community to improve the efficiency and automation in theorem provers by providing a connection with state-of-the-art automatic reasoning tools. In this paper, we present a general mechanism we are building to connect deduction tools, external to the ACL2 theorem prover, with that prover. ACL2 [KMM00, KM06] is an industrial-strength interactive theorem proving system. It consists of an efficient programming interface based on an applicative subset of Common Lisp <ref type="bibr" target="#b51">[KM94]</ref>, and a first-order, inductive theorem prover for a logic of recursive functions. The ACL2 theorem prover supports several deduction mechanisms such as congruence-based conditional rewriting, well-founded induction, several decision procedures, and generalization. The theorem prover has been particularly successful in the verification of microprocessors and hardware designs such as the floating point multiplication, division, and square root algorithms of AMD processors [MLK98, Rus98, RF00, FKR + 02], microcode for the Motorola CAP DSP <ref type="bibr" target="#b38">[BH97]</ref>, separation properties for the Rockwell Collins AAMP7 TM processor <ref type="bibr" target="#b46">[GRW04]</ref>, and a non-trivial pipelined machine with interrupts, exceptions, and speculative instruction execution <ref type="bibr" target="#b69">[SH97]</ref>. However, the applicability of ACL2 (as in fact that of any theorem prover) is often limited by the amount of user expertise required to drive the theorem prover; indeed, the verification projects referenced above represent many man-years of effort. Yet, a significant number of lemmas proven in the process, in particular many proofs exhibiting invariance of predicates over executions of hardware design implementations, can be expressed in a decidable theory and automatically dispatched by an automatic decision procedure for the theory.</p><p>On the other hand, it is not trivial to connect ACL2 with an external deduction tool. The logic of ACL2 is complicated by the presence of several constructs intended to facilitate effective proof structuring <ref type="bibr" target="#b53">[KM01]</ref>. It is therefore imperative (i) to determine under what logical constraints a conjecture certified by a combination of the theorem prover and other tools can be claimed to be a valid theorem, and (ii) to provide mechanisms so that a tool implementor might be able to meet the logical constraints so determined.</p><p>In this paper, we propose a general interface for connecting external tools with ACL2. The user can instruct ACL2 to use external deduction tools for reducing a goal formula C to a list of formulas L C during a proof attempt. The claim is that provability of each formula in L C implies the provability of C. We present a sufficient condition expressible in ACL2 guaranteeing this claim, and discuss the soundness requirements on the tool implementor. We also propose a modest augmentation of the logical guarantees provided by ACL2, in order to facilitate connection with certain types of tools (cf. Section 5).</p><p>We distinguish between two classes of external tools, namely (i) tools verified by the ACL2 theorem prover, and (ii) unverified but trusted tools. A verified tool must be formalized in the logic of ACL2 and the sufficient condition alluded to above must be formally established by the theorem prover. An unverified tool can be defined using the ACL2 programming interface, and can invoke arbitrary executable programs using calls to the underlying operating system via a system call interface. An unverified tool is introduced with a "tag" acknowledging that the validity of the formulas proven using the tool depends on the correctness of the tool.</p><p>The interface for unverified tools enables us to invoke Boolean Satisfiability solvers, BDD packages, etc., for simplifying ACL2 subgoals. Why might verified tools be of interest? The formal language of ACL2 is a programming language, based on an applicative subset of Common Lisp. The close connection with Lisp makes it possible to write efficiently executable programs in the ACL2 logic <ref type="bibr" target="#b51">[KM94]</ref>. In fact, most of the ACL2 source code is implemented in this language. We believe it will be handy to provide facilities to the ACL2 user to control proofs by (i) implementing customized domain-specific reasoning code, (ii) verifying with ACL2 that the code is sound, and (iii) invoking the code for proving theorems in the target domain. In fact, ACL2 currently provides a way for users to augment its built-in term simplifier with their own customized reasoning code, via the so-called "meta rules" <ref type="bibr" target="#b39">[BM81]</ref>. However, such rules essentially augment the reasoning engine of ACL2 without providing the user control to manipulate a specific subgoal arising during a proof. Furthermore, meta rules only allow reducing a term to one that is provably equivalent-they do not allow generalization.</p><p>With our interface, an ACL2 user can invoke directly a customized, verified reasoning tool to replace a subgoal by a collection of possibly more general subgoals.</p><p>The remainder of the paper is organized as follows. In Section 2 we provide a brief overview of the ACL2 system. In Sections 3 through 5 we present our interface for connecting verified and unverified external tools with ACL2, touching upon the logical underpinnings involved. We discuss related work in Section 6 and conclude in Section 7. No previous familiarity with ACL2 is assumed in this presentation; the relevant features of the logic and the theorem prover are discussed in Section 2.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">ACL2</head><p>The name "ACL2" stands for "A Computational Logic for Applicative Common Lisp". The name is used to denote (i) a programming language based on an applicative subset of Common Lisp, (ii) a first-order logic of recursive functions with induction, and (iii) a theorem prover for the logic. In this section, we provide a brief overview of ACL2. The review is not complete, but only intended to lay the foundation for our work. Readers interested in learning ACL2 are referred to the ACL2 Home Page <ref type="bibr" target="#b54">[KM06]</ref> which contains extensive hypertext documentation together with references to several books and papers.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1">The logic</head><p>The kernel of the ACL2 logic consists of a formal syntax, some rules of inference, and some axioms. Kaufmann and Moore <ref type="bibr" target="#b52">[KM97]</ref> provide a precise description of the kernel logic. The logic supported by the theorem prover is an extension of the kernel logic.</p><p>The kernel syntax describes terms composed of variables, constants, and function symbols applied to a fixed number of argument terms. The kernel logic introduces "formulas" as composed of equalities between terms and the usual propositional connectives.</p><p>The syntax of ACL2 is the prefix-normal syntax of Lisp; thus, the application of a binary function f on arguments a and b is represented by (f a b) rather than the more traditional f (a, b). However, in this paper we will use the formal syntax only when it is relevant for the associated discussion. In particular we will write (x × y) instead of (* x y) and (if x then y else z) instead of (if x y z).</p><p>The axioms of ACL2 describe the properties of certain Common Lisp primitives. For example, the following are axioms about the primitives equal and if:</p><formula xml:id="formula_4">Axioms. x = y ⇒ equal(x, y) = T x = y ⇒ equal(x, y) = NIL x = NIL ⇒ (if x then y else z) = z x = NIL ⇒ (if x then y else z) = y</formula><p>Notice that the kernel syntax is quantifier-free and each formula is implicitly universally quantified over all the free variables in the formula. Furthermore, the use of function symbols equal and if make it possible to embed propositional calculus and equality into the term language. When we write a term τ in place of a formula, it stands for the formula τ = NIL. Thus, in ACL2, the following term is an axiom relating the Lisp functions cons, car, and equal.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Axiom. equal(cons(car(x, y)), x)</head><p>This axiom stands for the formula equal(car(cons(x, y)), x) = NIL, which is provably equivalent to car(cons(x, y)) = x. With this convention, we will feel free to interchange terms and formulas. We will similarly feel free to apply logical connectives to a term or formula. Thus when we write ¬τ , where τ is a term, we mean the term (or formula by the above convention) obtained by applying the function symbol not to τ , where not is axiomatized as:</p><formula xml:id="formula_5">Axiom. not(x) = if x then NIL else T</formula><p>The convention above enables us to interpret an ACL2 theorem as follows. If the term τ (when interpreted as a formula) is a theorem then for all substitutions σ of free variables in τ to objects in the ACL2 universe the (ground) term τ /σ evaluates to a non-NIL value. This alternative view will be critical in deriving sufficient conditions for correctness of external tools integrated with ACL2.</p><p>The kernel logic includes axioms that characterize the primitive Lisp functions over numbers, characters, strings, symbols, and ordered pairs. These objects together make up the ACL2 standard universe; but "non-standard" ACL2 universes may contain other objects. Lists are represented as ordered pairs, so that the list (1 2 3) is represented as cons(1, cons(2, cons(3, NIL))). For brevity, we will write list(x, y, z) as an abbreviation for cons(x, cons(y, cons(z, NIL))). Another convenient data structure built out of ordered pairs is the association list (or alist) which is essentially a list of pairs, e.g., list(cons("a", 1), cons("b", 2)). We often use alists for describing finite mappings; the above alist can be thought as a mapping that associates the strings "a" and "b" with 1 and 2, respectively.</p><p>In addition to propositional calculus and equality the rules of inference of ACL2 include instantiation, together with first-order induction over 0 (see below). For instance, the formula car(cons(2, x)) = 2 is provable by instantiation from the above axiom relating car, cons, and equal.</p><p>The ACL2 theorem prover initializes with a boot-strapping (first-order) theory called the Ground Zero theory (GZ for short). In the sequel, whenever we mention an ACL2 theory, we mean a theory obtained by extending GZ via the extension principles explained below. The theory GZ contains the axioms of the kernel logic. In addition, it also contains a well-founded first-order induction principle, by way of an embedding of ordinals below 0 . In particular, GZ is assumed to be inductively complete, that is, it is assumed implicitly to contain all the first-order well-founded induction axioms expressible using formulas φ in the language of GZ:</p><formula xml:id="formula_6">(∀y &lt; 0 )[((∀x &lt; y)φ/{y := x}) ⇒ φ(y)] ⇒ (∀y &lt; 0 )φ(y)</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1.1">Extension Principles</head><p>ACL2 also provides several extension principles that allow the user to extend a theory by introducing new function symbols and axioms about them. Two extension principles that are particularly relevant to us are (i) the definitional principle to introduce total functions, and (ii) the encapsulation principle to introduce constrained functions,<ref type="foot" target="#foot_6">1</ref> and we discuss them in some detail. Note that whenever we say (below) that a theory is extended by axiomatizing new function symbols we implicitly assume that the resulting theory is also inductively complete, that is, all the induction axioms in the language of the extended theory are also introduced together with the axioms explicitly specified.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Definitional Principle:</head><p>The definitional principle allows the user to extend a theory by axiomatizing new total (recursive) functions. For example, one can use this principle to introduce the unary function symbol fact axiomatized as follows, which returns the factorial of its argument.</p><formula xml:id="formula_7">Definitional Axiom. fact(n) = if natp(n) ∧ (n &gt; 0) then n × fact(n − 1) else 1</formula><p>Here, natp(n) is axiomatized in GZ to return T if n is a natural number, and NIL otherwise. To ensure that the extended theory is consistent, ACL2 first proves that the recursion terminates. This is achieved by exhibiting some measure m that maps the set of function arguments to some well-founded structure derived from the embedding of ordinals below 0 . For the axiom above, an appropriate measure is nfix(n) which is axiomatized in GZ to return n if n is a natural number, otherwise 0.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Encapsulation Principle:</head><p>The encapsulation principle allows the extension of the ACL2 logic with functions introduced with constraints rather than full definitions. This principle, for instance, allows us to extend a theory by introducing a new unary function foo with only the following axiom that merely posits that foo always returns a natural number: Encapsulation Axiom. natp(foo(x))</p><p>The encapsulation axioms are also referred to as constraints, and the functions introduced via this principle are called constrained functions. To ensure the consistency of the resulting theory, one must show that there exist (total) functions satisfying the alleged constraints; such functions are called witnesses to the constraints. For foo above, an appropriate witness is the constant function that always returns 1.</p><p>For a constrained function f the only axioms known are the constraints. Therefore, any theorem proved about f is also valid for a function f that also satisfies the constraints. More precisely, call the conjunction of the constraints on f the formula φ. For any formula θ, let θ be the formula obtained by replacing the function symbol f by the function symbol f . Then a derived rule of inference, functional instantiation, specifies that if φ and ψ are theorems then ψ is also a theorem. Consider, for example, the constant function of one argument that returns 10. This function satisfies the constraint for foo; thus if bar(foo(x)) is provable for some function bar then functional instantiation can be used to prove bar(10). .</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">The Theorem Prover</head><p>As a theorem prover, ACL2 is an automated, interactive proof assistant. It is automated in the sense that no user input is expected once the theorem prover has embarked on the search for the proof of a conjecture. It is interactive in the sense that the proof search is largely determined by the previously proven lemmas in its database at the beginning of a proof attempt; the user essentially programs the theorem prover by stating lemmas for it to prove, to use automatically in subsequent proofs. There is also a goal-directed interactive loop (called the "proof-checker"), similar in nature to what is offered by LCF-style provers; but it is much less frequently used and not relevant to the discussion below.</p><p>Interacting with the ACL2 theorem prover principally proceeds as follows. The user creates a relevant theory (extending GZ) using the extension principles to model some artifact of interest. Then she poses some conjecture about the functions in the theory and instructs the theorem prover to prove the conjecture, possibly providing hints on how to proceed in the proof search. For instance, if the artifact is the factorial function above, an appropriate conjecture might be the following formula, which says that fact always returns a natural number.</p><formula xml:id="formula_8">Theorem fact-is-natp: natp(fact(x)) = T</formula><p>The theorem prover attempts to prove such a conjecture by applying a sequence of transformations to it, replacing each goal (initially, the conjecture) with a list of subgoals. ACL2 provides a hint mechanism that enables the user to instruct the theorem prover on how to proceed with its proof search at any goal or subgoal. For instance, the user can instruct the theorem prover to begin its proof search by inducting on x. Once a theorem is proven, the theorem prover stores it in a database, for use in subsequent derivations. This database groups theorems into various rule classes, which affects how the theorem prover will automatically apply them. The default rule class is rewrite, which causes the theorem prover to replace instances of the left-hand-side of an equality with its corresponding right-hand-side. If the conjecture fact-is-natp above is a rewrite rule, then subsequently whenever ACL2 encounters a term of the form natp(fact(τ )) in the course of a proof attempt, it rewrites the term to T.</p><p>ACL2 users interact with the theorem prover primarily by issuing a sequence of event commands for introducing new functions and proving theorems with appropriate rule classes. For example, fact-is-natp is the name of the above theorem event. During proof development the user typically records events in a file, often referred to as a book. Once the desired theorems have been proven, the user instructs ACL2 to certify such a book in order to facilitate the use of the events in other projects. A book can be certified once and then included during a subsequent ACL2 session without rerunning the associated proofs. To facilitate structured proof development, the user is permitted to mark some of the events in a book as local events. For instance, to prove some relevant theorem the user might introduce several auxiliary functions and intermediate lemmas that are not generally useful; such events are typically marked to be local. When a book is included in a subsequent proof project, only the non-local events in the book are accessible, thus preventing unwanted clutter in the database of the theorem prover.</p><p>The presence of local events complicates the soundness claims for ACL2. Note from above that local events in a book might include commands for introducing new functions (thus extending an ACL2 theory with new axioms), which are not available in the subsequent sessions where the book is loaded. Yet, in order to prove some nonlocal theorem in the book ACL2 might have used some of these local axioms. One must therefore answer under what condition it is legitimate to mark an axiomatic event in a book as local, and what formal soundness claims can be provided for an ACL2 session in which such a pre-certified book is loaded. Such questions have been answered by Kaufmann and Moore <ref type="bibr" target="#b53">[KM01]</ref>: if a formula φ is proven as a theorem in an ACL2 session, then φ is in fact first-order derivable (with induction) from the axioms of GZ together with (hereditarily) only the axiomatic events in the session that involve the function symbols in φ. (In particular, every ACL2 session corresponds to a theory that is a conservative extension of GZ.) Thus, any definition or theorem that does not involve the function symbols in the non-local events of a book can be marked local. To implement this requirement, book certification involves two passes. In the first pass, ACL2 proves each theorem (and admits each axiomatic event) sequentially. In the second pass, it skips proofs, and makes a so-called local incompatibility check, checking primarily that each axiomatic event involved in any non-local theorem in the book is also non-local.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3">The ACL2 Programming Environment</head><p>ACL2 is closely tied with Common Lisp. The formal syntax of the logic is essentially the syntax of Lisp, and the axioms in GZ for the primitive Lisp functions are carefully crafted so that the return value of a function as predicted by the axioms matches with the value specified in the Common Lisp Manual on arguments in the intended domain of its application. Furthermore, events corresponding to functions introduced using the definitional principle are essentially Lisp definitions. For instance, consider the factorial function fact described above. The formal event introducing the definitional axiom of fact is written in ACL2 as follows.</p><p>(defun fact (n) (if (and (natp n) (&gt; n 0)) (* n (fact (-n 1))) 1))</p><p>This is essentially a Lisp definition of the function! The connection with Common Lisp enables the users of ACL2 to execute formal definitions by using the underlying Lisp evaluator. Since Lisp is an ANSI-standard, efficient functional programming language, ACL2 users often make use of the connection to implement formally defined yet efficient code. Indeed, the theorem prover itself makes use of this connection for simplifying ground terms during proof search; for instance, ACL2 will simplify fact(3) to 6 by evaluation in the underlying Lisp.</p><p>In order to facilitate efficient code development, ACL2 also provides a logic-free programming environment. A user can implement any applicative Lisp function and mark it to be in program mode. No proof obligation is generated for such functions. ACL2 can evaluate such functions using the Lisp evaluator, although no logical guarantee (including termination) is provided. Furthermore, ACL2 provides an interface to the underlying operating system, which enables the user to invoke arbitrary executable code (and operating system commands) from inside an ACL2 session.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.4">Evaluators</head><p>ACL2 provides a convenient notation for defining an evaluator for a fixed set of functions. Evaluators are used to support meta reasoning <ref type="bibr" target="#b39">[BM81]</ref>. We will not consider meta reasoning in this paper, but we briefly mention evaluators since they will be useful in characterizing the correctness of external tools.</p><p>A proof search involves applying transformations to reduce a goal to a collection of subgoals. Internally, ACL2 stores each goal as a clause represented as an object in the ACL2 universe. For instance, when ACL2 attempts to prove a theorem of the form τ 1 ∧ τ 2 ∧ . . . ∧ τ n ⇒ τ , it represents the proof goal internally as a list of terms, (¬τ 1 ... ¬τ n τ ), which can be thought of as the disjunction of its elements (literals). When ACL2 works on any subgoal, the transformation procedures work on the internal representation of the subgoal, called the current clause. Since this representation is an ACL2 object, we can define functions over such objects.</p><p>An evaluator makes explicit the connection between terms and their internal representations. Assume that f 1 , . . . , f n are functions axiomatized in some ACL2 theory T . A function ev, also axiomatized in T is called an evaluator for f 1 , . . . , f n , if the axioms associated with ev can be viewed as specifying an evaluation semantics for the internal representation of terms composed of f 1 , . . . , f n that is consistent with the definitions of these functions; such axioms are then referred to as evaluator axioms. A precise characterization of all the evaluator axioms is described in the ACL2 Manual <ref type="bibr" target="#b54">[KM06]</ref> under the documentation topic defevaluator; here we only mention one for illustration, which corresponds to the evaluation of the m-ary function symbol f i : An Evaluator Axiom. ev(list( f i , τ 1 , ..., τ m ), a) = f i (ev( τ 1 , a), . . . , ev( τ m , a))</p><p>Here 'f i is assumed to be the internal representation of f i and 'τ j is the internal representation of τ j , for 1 ≤ j ≤ m. It is convenient to think of a as an association list that maps the (internal representation of the) free variables in τ 1 , . . . , τ m to ACL2 objects. Then the axiom specifies that the evaluation of the list ('f i 'τ 1 ... 'τ m ) (which corresponds to the internal representation of f i (τ 1 , . . . , τ m )) under some mapping of free variables to objects is the same as the function f i applied to the evaluation of each τ j under the same mapping.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Verified External Tools</head><p>In this section, we discuss verified external tools. We consider verified tools first since they are amenable to perhaps a simpler understanding than unverified ones. The ideas and infrastructure we develop in this section will be extended successively in the next two sections to support connections with unverified tools. We will refer to external deduction tools as clause processors. Recall that ACL2 internally represents terms as clauses, so that a subgoal of the form τ 0 ∧ τ 1 ∧ . . . ∧ τ n ⇒ τ is represented as a disjunction by the list (¬τ 0 ¬τ 1 ... ¬τ n τ ). Our interface enables the user to transform the current clause with custom code. More precisely, a clause processor is a function that takes a clause C (together with possibly other arguments)  Here *T* and *NIL* are assumed to be the internal representation of T and NIL respectively. The predicate consp is defined in GZ such that consp(x) returns T if x is an ordered pair, and NIL otherwise. and returns a list of clauses L C . <ref type="foot" target="#foot_7">2</ref> The intention is that if each clause in L C is a theorem of the current ACL2 theory then so is C. In the remainder of the paper, when we talk about clause processors, we will mean such clause manipulation functions.</p><p>Our interface for verified external tools constitutes the following components.</p><p>• A new rule class for installing clause processors. Suppose the user has defined a function tool0 that she desires to use as a clause processor. She can then prove a specific theorem about tool0 (described below) and attach this rule class to the theorem. The effect is to install tool0 in the ACL2 database as a clause processor for use in subsequent proof attempts.</p><p>• A new hint for using clause processors. Once tool0 has been installed as a clause processor it can be invoked via this hint to transform a conjecture during a subsequent proof attempt. If the user instructs ACL2 to use tool0 to help prove some goal G, then ACL2 transforms G into the collection of subgoals generated by executing tool0 on (the clause representation of) G.</p><p>We now explain the theorem alluded to above for installing a function tool0 as a clause processor. Recall that one way to interpret a formula proven by ACL2 is via an evaluation semantics; that is, a formula Φ is a theorem if, for every substitution σ mapping each free variable of Φ to some object, Φ/σ does not evaluate to NIL. Our formal proof obligation for installing functions as clause processors is based on this evaluation semantics. Let C be a clause whose disjunction is the term τ , and let tool0, with C as its argument, produce the list (C 1 ... C n ) whose respective disjunctions are the terms τ 1 , . . . , τ n . Informally, we want to ensure that if τ /σ evaluates to NIL for some substitution σ then there is some σ and i such that τ i /σ also evaluates to NIL. This condition can be made precise in the logic of ACL2 by extending the notion of evaluators discussed in Section 2.4 from terms to clauses. Before describing the extension, we will assume that the ACL2 ground zero theory GZ contains two functions disjoin and conjoin axiomatized as shown in Figure <ref type="figure" target="#fig_4">1</ref>. Informally, the axioms specify how to interpret objects representing clauses and clause lists. For instance, the function disjoin specifies that the interpretation of a clause (τ 0 τ 1 τ 2 ) is the same as the interpretation of (if</p><formula xml:id="formula_9">τ 0 T (if τ 1 T (if τ 2 T NIL))</formula><p>)), which represents the disjunctions of the terms τ 0 , τ 1 , and τ 2 .</p><p>Based on these axioms, we can formalize the correctness of clause processors by defining an evaluation semantics for clauses. In particular, assume that ev is an evaluator for the single function if. Thus ev(list(if, τ 0 , τ 1 , τ 2 ), a) stipulates how the term "if τ 0 then τ 1 else τ 2 " can be evaluated. Here ev is assumed to be an evaluator for if, and args represents the remaining arguments of tool0 (in addition to clause C). The predicates term-listp and alistp are axiomatized in GZ such that (i) term-listp(x) returns a Boolean, which is T if and only if x is an object in the ACL2 universe representing a well-formed list of terms (and hence a clause), and (ii) alistp(a) returns a Boolean, which is T if and only if a is a well-formed association list.</p><p>via the extension principles), a clause processor function tool0(args, C) will be said to be legal in T if there exists a function tool0-env in T such that the formula shown in Figure <ref type="figure" target="#fig_5">2</ref> is a theorem. The function tool0-env returns an association list like σ in our informal example above: it potentially modifies the original association list to respect any generalization being performed by tool0. Note that a weaker theorem would logically suffice, replacing the use of the association list tool0-env(args, c, a) by an existentially quantified variable.</p><p>A theorem of the form shown in Figure <ref type="figure" target="#fig_5">2</ref> can be tagged with the new rule class for clause processors, instructing ACL2 to use the function tool0 as a new verified external tool. Theorem 1 below, based on the "Essay on Correctness of Meta Reasoning" comment in the ACL2 sources, guarantees that the above condition is sufficient for the soundness of using tool0 to transform goal conjectures.</p><p>Theorem 1 Let T be an ACL2 theory for which tool0 is a legal clause processor, and let tool0 return a list L C of clauses given an input clause C. If each clause in L C is provable in T , then C is also provable in T .</p><p>Proof: The theorem is a simple consequence of the following lemma, given the correctness condition shown in Figure <ref type="figure" target="#fig_5">2</ref>.</p><p>Lemma 1 Let τ be a term with free variables v 0 , . . . , v n , ev an evaluator for the function symbols in τ , and e a list of cons pairs of the form ( 'v0, 'τ 0 ... 'vn, 'τ n ), where 'v i and 'τ i are internal representation of v i and τ i respectively. Let σ be a substitution mapping each v i to τ i , and let 'τ be the internal representation of the term τ . Then the following formula is a theorem: ev( τ , e) = τ /σ.</p><p>Proof: An easy induction on the structure of term τ .</p><p>The simplicity of the above proof might belie some of the subtleties involved. For instance, recall that each ACL2 theory T is a conservative extension of GZ. Furthermore, note that theorems whose proofs use an invocation of tool0 often do not involve the function symbols occurring in the definition of the function tool0 itself. For instance, assume that tool0 is a simple clause generalizer that replaces each occurrence of a specific subterm in a clause by a free variable not present in the original clause. Such a function can be invoked for generalization in the proof of a formula Φ although Φ might not contain any occurrence of tool0. On the completion of a successful proof of Φ, can we then mark tool0 as local? The answer is in general "no", since Theorem 1 only guarantees provability of the clause input to the clause processor from those returned in the theory in which the clause processor is legal. In particular such a theory must contain the definitions of the function symbols being manipulated by tool0, and for this it suffices that tool0 not be marked local. In fact a soundness bug in a previous but very recent release of ACL2 occurred in an analogous context for meta rules, due to ACL2's previous inability to track the fact that the theory in which such rules are applied indeed included the definitions supporting the corresponding evaluators.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Basic Unverified External Tools</head><p>Verified clause processors are useful when the user intends to augment the reasoning engine of ACL2 with mechanically checked code for customized clause manipulation. However, more often, we want to manipulate goal conjectures using a tool that is external to the theorem prover, for instance a state-of-the-art Boolean satisfiability solver or model checker. In this section, we will consider an extension of the mechanisms to incorporate such tools. In the next section we will present additional constructs to facilitate integration with more general tools.</p><p>Our interface for unverified tools involves extending the theorem prover with a new event that enables ACL2 to recognize some function tool1 defined by the user as an unverified clause processor. Here the function tool1 might be implemented using program mode and might also invoke arbitrary executable code using ACL2's system call interface (cf. Section 2.3). The effect of the event in subsequent proof search with ACL2 is the same as if tool1 were introduced as a verified clause processor: hints can be used to invoke the function for manipulating terms arising during proofs.</p><p>Suppose an unverified tool tool1 simplifies a clause in the course of proving some goal conjecture. What guarantees should an implementor of tool1 provide (and must the user trust) in order to claim that the goal conjecture is indeed a theorem? In this simple case, a sufficient guarantee is that there is a theory T containing the definition of tool1 and appropriate evaluators such that the formula analogous to the one shown in Figure <ref type="figure" target="#fig_5">2</ref> in the previous section for tool1 is a theorem of T . The soundness of the use of tool1 then follows from Theorem 1.</p><p>Since the invocation of an unverified tool for simplifying ACL2 conjectures carries a logical burden, the event introducing such tools provides two constructs, namely (i) a tag for the user of the tool to acknowledge this burden, and (ii) a concept of supporters for the tool developer to implement the tool in a way as to be able to guarantee that the logical restrictions are met. We now explain these two constructs.</p><p>The tag associated with an event installing an unverified tool tool1 is a symbol (the default value being the name of the tool itself), which must be used to acknowledge that the soundness of any theorem proven by an application of tool1 depends on the implementor of tool1 satisfying the logical guarantees above. The certification of any book that contains an event installing an unverified clause processor (or hereditarily includes such a book, even locally) requires the user to tag the certification command with the name of the tags introduced with the event. Note that technically the mere act of installing an unverified tool does not introduce any unsoundness; the logical burden expressed above pertains to the use of the tool. Nevertheless, our decision to insist that the certification of any book with an installation of an unverified tool (whether subsequently used or not) to be tagged is governed by implementation convenience. Recall that the local incompatibility check (that is, the second pass of a book certification) skips proofs, and thereby ignores the hints provided during the proof process. By "tracking" the installation rather than the application of an unverified clause processor, we disallow the possibility of a user certifying a book that locally introduces an unverified tool and uses it for simplifying some formulas, without acknowledging the application of the tool.</p><p>Finally we turn to supporters. This construct enables a tool developer to provide the guarantee outlined above in the presence of local events. To understand why this construct is necessary, consider the following scenario. Suppose a developer creates a book (say, book1) in which the function f is introduced locally with the following definitional axiom:</p><formula xml:id="formula_10">Local Definitional Axiom. f (x) = x</formula><p>Suppose further that book1 also installs an unverified clause processor tool1. Assume that the definition of tool1 does not involve invocation of f , but it replaces terms of the form f (τ ) with τ ; thus the correctness of tool1 depends on the intended definition of f . However, if an ACL2 session is extended by including book1, then the extended session contains the definition of tool0 tagged as an unverified clause processor, but does not contain the (local) definition of f . Thus we can write another book (say, book2) that includes book1 and then provides a new definition of f , for instance the following:</p><formula xml:id="formula_11">Definitional Axiom. f (x) = cons(x, x)</formula><p>We now are working in a theory in which tool1 may be used to perform term manipulations that are completely unjustified by the current definition of f , thus invalidating any guarantee provided by the implementor of tool1.</p><p>In general, then, suppose that a tool has built-in knowledge about some function symbols. The tool implementor cannot meet the logical burden expressed above unless the user of the tool is required to include the axioms that have been introduced for those function symbols. The supporters construct of the event installing unverified clause processors provides a way for the implementor to insist that such axioms are present, by listing the names of axiomatic events (typically function symbols that name their definitions, e.g., f in the example above). We will refer to these events as the supporting events for the clause processor. Whenever ACL2 encounters an event installing a function tool1 as an unverified clause processor with a non-empty list of supporters, it will check that tool1 and all of the supporting event names are already defined.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Templates and Generalized External Tools</head><p>The view above of unverified tools is that if a clause processor replaces some clause with a list of clauses then the provability of the resulting clauses implies the provability of the original clause. A clause processor is thus an efficient procedure for assisting in proofs of theorems that could, in principle, have been proven from the axiomatic events of the current theory. This simple view is sufficient in most situations; for instance, one can use it to connect ACL2 with a Boolean satisfiability solver that checks if a propositional formula is a tautology. However, some ACL2 users have found the necessity to use more sophisticated tools that implement their own theory. We will now discuss an extension to the ACL2 logic that facilitates connection with such tools.</p><p>To motivate the need for such tools, assume that we wish to prove a theorem about some hardware design. Most such designs are written in a Hardware Description Language (HDL) such as VHDL or Verilog. One way of formalizing such designs is to define a semantics of the HDL in ACL2, possibly by defining a formal interpreter for the language. However, defining such an interpreter is typically extremely complex and labor-intensive. On the other hand, there are several model checkers available which can parse designs written in VHDL or Verilog. An alternative is merely to constrain some properties of the interpreter and use a combination of theorem proving and model checking in the following manner:</p><p>• Establish low-level properties of parts of a design using model checkers or other decision procedures.</p><p>• Use the theorem prover to compose the properties proven by the model checker together with the constrained properties of the interpreter to establish the correctness of the design.</p><p>The above approach has shown promise in scaling formal verification to industrial designs. For instance, Sawada and Reeber <ref type="bibr" target="#b71">[SR06]</ref> have recently verified an industrial VHDL floating-point multiplier using a combination of ACL2 and an IBM internal verification tool called SixthSense [MBP + 04]. They introduce two functions, sigbit and sigvec, with the following assumed semantics:</p><p>• sigbit(e, s, n, p) returns a bit corresponding to the value of bit signal s of a VHDL design e at cycle n and phase p.</p><p>• sigvec(e, s, l, h, n, p) returns a bit vector corresponding to the bit-range between l and h of s for design e at cycle n and phase p.</p><p>In ACL2 these two functions are constrained only to return a bit and bit-vector respectively. The key properties of the different multiplier stages are proven using SixthSense.</p><p>For instance, one of the properties proven is that sigvec when applied to (i) a constant C representing the multiplier design, (ii) a specific signal s of the design, (iii) two specific values lb and hb corresponding to the bit-width of s, and (iv) a specific cycle and phase, returns the sum of two other bit vectors at the previous cycle; this corresponds to one stage of the Wallace-tree decomposition implemented by the multiplier. All such theorems are then composed by ACL2 to verify that the multiplier, when provided two vectors of the right size, produces their product after 5 cycles. How do we support this verification approach? Note that the property above is not provable from the constraints on the associated functions alone (namely sigvec returns a bit vector). Thus if we use encapsulation to constrain sigvec and posit the property as a theorem then functional instantiation can derive an inconsistency. The problem is that the property is provable from the constraints together with axioms about sigvec that are unknown to ACL2 but assumed to be accessible to SixthSense.</p><p>Our solution to the above is to extend the extension principles of ACL2 with a new principle called encapsulation templates (or simply templates). Function symbols introduced via templates are constrained functions just like those introduced via the encapsulation principle, and the soundness of extending an ACL2 theory is analogously guaranteed by exhibiting a local witness satisfying the constraints. However, there is one significant distinction between encapsulation principle and templates: the constraints introduced are marked incomplete, acknowledging that they might not encompass all the constraints on the functions. ACL2 therefore disallows functional instantiation of theorems by substituting for functions introduced via templates.</p><p>The use of template events facilitates integration of ACL2 with tools like SixthSense above. Suppose that we wish to connect ACL2 with an unverified tool tool1 that implements a theory that we do not wish to define explicitly in ACL2. We then use a template event to introduce the function symbols (say f and g) regarding which the theory of the clause processor contains additional axioms. Finally we introduce tool1 as an unverified clause processor, marking f and g as supporting events.</p><p>We now explain the logical burden for the developer of such a connection. Assume that an ACL2 theory T is extended by a template event E, and suppose that the supporting events for tool1 mention some function introduced by E. Then the developer of tool1 must guarantee that it is possible, in principle, to introduce f and g via the encapsulation principle (which we will refer to as the "promised" encapsulation E P of the functions) such that the following conditions hold:</p><p>1. The constraints in E P include the constraints in E.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.">E P does not introduce any additional function symbols other than those intro-</head><p>duced by E.</p><p>3. E P is admissible in theory T .</p><p>4. For any extension T 0 of T together with the constraints in E P , if one can invoke tool1 to reduce some clause C to a list of clauses L C then if each clause of L C is first-order provable (with induction) in T 0 then C must be provable in T 0 .</p><p>Furthermore, in order to make logical guarantees regarding ACL2 sessions that contain events corresponding to several unverified external tools, ACL2 enforces the following "disjointness condition": a template event may not be extended to a promised encapsulate by two different clause processors. Thus, when an unverified clause processor installation event has a supporting event name, f , such that f is a function symbol that had been introduced by a template, it is required that no unverified clause processor has been previously installed in the current ACL2 session that has a supporting event name that is a function symbol introduced in the same template. This makes it possible to view the event as essentially the (unique) promised encapsulation whose existence is guaranteed by the implementor of the tool. Note that condition 2 above is necessary for this purpose to preclude the possibility that the theory implemented by different external tools might have conflicting implicit axioms in their promised encapsulations for function symbols not introduced by the template.</p><p>With these conditions, we can make the following informal claim for an ACL2 session which includes templates together with the use of unverified clause processors:</p><p>Perform the following transformation in sequence to each template event E in the session. If there is a tool tool1 whose supporting events mention a function symbol introduced by E then replace E with the encapsulation E P promised by the developer of tool1. Otherwise extend E to an arbitrary admissible encapsulate. (Note that at least one such extension exists, namely one in which no additional constraint is introduced.) Then every alleged theorem in the session is in fact derivable in first-order logic (with induction) from the axiomatic events in the session produced after this transformation.</p><p>The informal claim above can be made precise by formalizing the notion of an ACL2 session. Kaufmann and Moore <ref type="bibr" target="#b53">[KM01]</ref> describe such a formalization where a valid session is modeled as a chronology, inductively defined as a sequence of events that is either (i) the empty sequence, or (ii) constructed from a sequence by introducing one of the legal ACL2 events such as commands for introducing new functions, and proving theorems. We omit that description here and refer the reader to their paper <ref type="bibr" target="#b53">[KM01]</ref> for details. For this paper, we point out that given a careful inductive characterization of a session as a chronology, it is easy to see that an ACL2 session transformed as above really corresponds to a chronology. The basic observation is that for any chronology in which no function introduced by an encapsulation is functionally instantiated, the encapsulation may be strengthened and the result is still a chronology. The proof is by induction on the formation of chronologies, and each proof obligation encountered in the inductive step is discharged against the possibly stronger theory.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Related Work</head><p>The importance of allowing the hooking up of external tools has been widely recognized in the theorem proving community. Some early ideas for connecting different theorem provers are discussed in a proposal for so-called "interface logics" <ref type="bibr" target="#b48">[Gut91]</ref>, with the goal to connect automated reasoning tools by defining a single logic L such that the logics of the individual tools can be viewed as sub-logics of L. More recently, with the success of model checkers and Boolean satisfiability solvers, there has been significant work connecting such tools with interactive theorem proving. The PVS theorem prover provides connections with several decision procedures such as model checkers and SAT solvers <ref type="bibr" target="#b67">[RSS95,</ref><ref type="bibr">Sha01]</ref>. The Isabelle theorem prover <ref type="bibr">[Pau]</ref> uses unverified external tools as oracles for checking formulas as theorems during a proof search; this mechanism has been used to integrate model checkers and arithmetic decision procedures with Isabelle <ref type="bibr" target="#b58">[MN95,</ref><ref type="bibr" target="#b37">BF00]</ref>. Oracles are also used in the HOL family of higher order logic theorem provers <ref type="bibr">[GM93]</ref>; for instance, the PROSPER project [DCN + 00] uses the HOL98 theorem prover as a uniform and logically-based coordination mechanism between several verification tools. The most recent incarnation of this family of theorem provers, HOL4, uses an external oracle interface to decide large Boolean formulas through connections to state-of-the-art BDD and SAT-solving libraries <ref type="bibr" target="#b45">[Gor02]</ref>, and also uses that oracle interface to connect HOL4 with ACL2 as discussed in the next section.</p><p>The primary basis for interfacing external tools with theorem provers for higherorder logic (specifically HOL and Isabelle) involves the concept of "theorem tagging", introduced by Gunter for HOL90 <ref type="bibr" target="#b47">[Gun98]</ref>. The idea is to introduce a tag in the logic for each oracle and view a theorem certified by the oracle as an implication with the tag corresponding to the certifying oracle as a hypothesis. This approach enables tracking of dependencies on unverified tools at the level of individual theorems. In contrast, our approach is designed to track such dependencies at the level of files, that is, ACL2 books. Our coarser level of tracking is at first glance unfortunate: if a book contains some events that depend on such tools and others that do not, then the entire book is "tainted" in the sense that its certification requires an appropriate acknowledgement for the tools. We believe that this will not prove to be an issue in practice, as ACL2 users typically find it easy to move events between books. On the positive side, it is simpler to track a single event introducing an external tool rather than uses of such an event, especially since hints are ignored when including previously certified books.</p><p>As an aside, we note that a very general tagging mechanism is under development for ACL2, serving as a foundation in particular for tagging of unverified clause processors.</p><p>There has also been work on using an external tool to search for a proof that can then be checked by the theorem prover without assistance from the tool. Hurd <ref type="bibr">[Hur02]</ref> describes such an interface connecting HOL with first-order logic. McCune and Shumsky [MS00] present a system called Ivy which uses Otter to search for first-order proofs of equational theories and then invokes ACL2 to check such proof objects. Meng and Paulson <ref type="bibr">[MP04]</ref> interface Isabelle with a resolution theorem prover.</p><p>Several ACL2 users have integrated external tools with ACL2; but without the disciplined mechanisms of this paper, such integration has essentially involved implementation hacks on the ACL2 source code. <ref type="bibr">Ray</ref> </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="7">Conclusion and Future Work</head><p>Different deduction tools bring in different capabilities to formal verification. A strength of general purpose theorem provers compared to many tools based on decision procedures is in the expressive power of the logic, which enables succinct definitions. Automatic decision procedures provide more automated proof procedures for decidable theories. Several ACL2 users have requested ways to connect ACL2 with automated decision procedures. We believe that the mechanisms described in this paper will provide a disciplined way of using ACL2 with other tools with a clear specification of the expectations from the tool in order to guarantee soundness of the ACL2 session. Furthermore, we believe that verified clause processors will provide a way for the user to control a proof more effectively without relying on ACL2's heuristics.</p><p>We have presented an approach to connecting ACL2 with external deduction tools, but we have merely scratched the surface. It is well-known that developing an effective interface between two or more deduction tools is a complicated exercise <ref type="bibr" target="#b50">[KM92]</ref>. It remains to be seen how to effectively decompose theorem proving problems so as to make effective use of clause processors to provide the requisite automation.</p><p>Some researchers have criticized our interface on the grounds that developing a connection with an external tool requires significant knowledge of the ACL2 logic. While we acknowledge that our interface requires understanding of that logic, including the term representation, we believe that such requirement is necessary for any developer interested in developing connections between formal tools. A connection between different formal tools must involve a connection between two logics, and the builder of such connection must understand both the logics, including the legal syntax of terms, and the axioms and rules of inferences. It should be noted that the logic of ACL2 is perhaps more complex than many others, principally because it offers proof structuring mechanisms by enabling the user to mark events as local. This complexity manifests itself in the interface; constructs such as supporters are provided essentially to enable the tool implementor to provide logical guarantees in the presence of local events. However, we believe that with these constructs it will be possible for the tool developers to implement connections with ACL2 with reasonable understanding of the theorem prover.</p><p>Finally, note that the restrictions for the tool developers that we have outlined preclude certain external deduction tools. For instance, there has been recent work connecting HOL with ACL2 [GHKR06a, GHKR06b]; the approach there has been for a HOL user to make use of ACL2's proof automation and fast execution capabilities. It might be of interest to the ACL2 user to take advantage of HOL's expressive power as well. We are working on extending the logical foundations of ACL2 to facilitate such a connection. The key idea is that the ACL2 theorem prover might be viewed as a theorem prover for the HOL logic. If the view is accurate then it will be possible for the user of ACL2 to prove some formulas in HOL and use them in an ACL2 session, claiming that the session essentially reflects a HOL session mirrored in ACL2.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Efficiently Checking Propositional Resolution Proofs in Isabelle/HOL 1 Introduction</head><p>Interactive theorem provers like PVS <ref type="bibr" target="#b100">[ORS92]</ref>, HOL <ref type="bibr">[GM93]</ref> or Isabelle <ref type="bibr" target="#b101">[Pau94]</ref> traditionally support rich specification logics. Proof search and automation for these logics however is difficult, and proving a non-trivial theorem usually requires manual guidance by an expert user. Automated theorem provers on the other hand, while often designed for simpler logics, have become increasingly powerful over the past few years. New algorithms, improved heuristics and faster hardware allow interesting theorems to be proved with little or no human interaction, sometimes within seconds. By integrating automated provers with interactive systems, we can preserve the richness of our specification logic and at the same time increase the degree of automation <ref type="bibr">[Sha01]</ref>. This is an idea that goes back at least to the early nineties <ref type="bibr" target="#b89">[KKS91]</ref>. However, to ensure that a potential bug in the automated prover does not render the whole system unsound, theorems in Isabelle, like in other LCF-style <ref type="bibr" target="#b83">[Gor00]</ref> provers, can be derived only through a fixed set of core inference rules. Therefore it is not sufficient for the automated prover to return whether a formula is provable, but it must also generate the actual proof, expressed (or expressable) in terms of the interactive system's inference rules.</p><p>Formal verification is an important application area of interactive theorem proving. Problems in verification can often be reduced to Boolean satisfiability (SAT), and recent SAT solver advances have made this approach feasible in practice. Hence the performance of an interactive prover on propositional problems may be of significant practical importance. In this paper we describe the integration of zChaff [MMZ + 01] and MiniSat <ref type="bibr" target="#b79">[ES04]</ref>, two leading SAT solvers, with the Isabelle/HOL <ref type="bibr" target="#b98">[NPW02]</ref> prover.</p><p>We have shown earlier <ref type="bibr" target="#b108">[Web05a,</ref><ref type="bibr" target="#b109">Web05b]</ref> that using a SAT solver to prove theorems of propositional logic dramatically improves Isabelle's performance on this class of formulae, even when a rather naive (and unfortunately, as we will see in Section 3, inefficient) representation of propositional problems is used. Furthermore, while Isabelle's previous decision procedures simply fail on unprovable conjectures, SAT solvers are able to produce concrete counterexamples. In this paper we focus on recent improvements of the proof reconstruction algorithm in Isabelle/HOL, which cause a speedup by several orders of magnitude. In particular the representation of the propositional problem turns out to be crucial for performance. While the implementation in <ref type="bibr" target="#b108">[Web05a]</ref> was still limited to relatively small SAT problems, the recent improvements now allow to check proofs with millions of resolution steps in reasonable time. This shows that, somewhat contrary to common belief, efficient proof checking in an LCF-style system is feasible.</p><p>The next section describes the integration of zChaff and MiniSat with Isabelle/HOL in more detail. In Section 3 we evaluate the performance of our approach, and report on experimental results. Related work is discussed in Section 4. Section 5 concludes this paper with some final remarks and points out directions for future research.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">System Description</head><p>To prove a propositional tautology φ in the Isabelle/HOL system with the help of zChaff or MiniSat, we proceed in several steps. First φ is negated, and the negation is converted into an equivalent formula φ * in conjunctive normal form. φ * is then written to a file in DIMACS CNF format <ref type="bibr" target="#b77">[DIM93]</ref>, the standard input format supported by most SAT solvers. zChaff and MiniSat, when run on this file, return either "unsatisfiable", or a satisfying assignment for φ * .</p><p>In the latter case, the satisfying assignment is displayed to the user. The assignment constitutes a counterexample to the original (unnegated) conjecture. When the solver returns "unsatisfiable" however, things are more complicated. If we have confidence in the SAT solver, we can simply trust its result and accept φ as a theorem in Isabelle. The theorem is tagged with an "oracle" flag to indicate that it was proved not through Isabelle's own inference rules, but by an external tool. In this scenario, a bug in the SAT solver could potentially allow us to derive inconsistent theorems in Isabelle/HOL.</p><p>The LCF-approach instead demands that we verify the solver's claim of unsatisfiability within Isabelle/HOL. While this is not as simple as the validation of a satisfying assignment, the increasing complexity of SAT solvers has before raised the question of support for independent verification of their results, and in 2003 zChaff has been extended by L. Zhang and S. Malik <ref type="bibr" target="#b110">[ZM03]</ref> to generate resolution-style proofs that can be verified by an independent checker. (This issue has also been acknowledged by the annual SAT Competition, which has introduced a special track on certified unsat answers in 2005.) More recently, a proof-logging version of MiniSat has been released <ref type="bibr" target="#b80">[ES06]</ref>, and John Matthews has extended this version to produce human-readable proofs that are easy to parse <ref type="bibr" target="#b90">[Mat06]</ref>, similar to those produced by zChaff. Hence our main task boils down to using Isabelle/HOL as an independent checker for the resolution proofs Both solvers store their proof in a text file that is read in by Isabelle, and the individual resolution steps are replayed in Isabelle/HOL. Section 2.1 briefly describes the necessary preprocessing of the input formula, and details of the proof reconstruction are explained in Section 2.2. The overall system architecture is shown in Figure <ref type="figure" target="#fig_4">1</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.1">Preprocessing</head><p>Isabelle/HOL offers higher-order logic (on top of Isabelle's meta logic), whereas most SAT solvers only support formulae of propositional logic in conjunctive normal form (CNF). Therefore the (negated) input formula φ must be preprocessed before it can be passed to the solver.</p><p>Two different CNF conversions are currently implemented: a naive encoding that may cause an exponential blowup of the formula, and a Tseitin-style encoding <ref type="bibr" target="#b106">[Tse83]</ref> that may introduce (existentially quantified) auxiliary Boolean variables, cf. <ref type="bibr" target="#b84">[Gor01]</ref>. The technical details can be found in <ref type="bibr" target="#b108">[Web05a]</ref>. More sophisticated CNF conversions, e.g. from <ref type="bibr" target="#b99">[NRW98]</ref>, could be implemented as well. The main focus of our work however is on efficient proof reconstruction, less on transformations of the input formula: the benchmark problems used for evaluation in Section 3 are already given in CNF anyway.</p><p>Note that it is not sufficient to convert φ into an equivalent formula φ * in CNF. Rather, we have to prove this equivalence inside Isabelle/HOL. The result is not a single formula, but a theorem of the form φ = φ * . The fact that our CNF transformation must be proof-producing leaves some potential for optimization. One could implement a non proof-producing (and therefore much faster) version of the same CNF transformation, and use it for preprocessing instead. Application of the proof-producing version would then be necessary only if the SAT solver has in fact shown a formula to be unsatisfiable. The total runtime on provable formulae would increase slightly, as the CNF transformation needed to be done twice -first without, later with proofs. Preprocessing times for unprovable formulae however should improve.</p><p>φ * is written to a file in DIMACS CNF format, and the SAT solver is invoked on this input file.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2">Proof Reconstruction</head><p>When zChaff and MiniSat return "unsatisfiable", they also generate a resolution-style proof of unsatisfiability and store the proof in a text file <ref type="bibr" target="#b110">[ZM03,</ref><ref type="bibr" target="#b90">Mat06]</ref>. While the precise format of this file differs between the solvers, the essential proof technique is the same. Both SAT solvers use propositional resolution to derive new clauses from existing ones:</p><formula xml:id="formula_12">P ∨ x Q ∨ ¬x P ∨ Q</formula><p>It is well-known that this single inference rule is sound and complete for propositional logic. A set of clauses is unsatisfiable iff the empty clause is derivable via resolution. (For the purpose of proof reconstruction, we are only interested in the proof returned by the SAT solver, not in the techniques and heuristics that the solver uses internally to find this proof. Therefore the integration of zChaff and MiniSat is quite similarminor differences in their proof trace format aside -, and further SAT solvers capable of generating resolution-style proofs could be integrated with Isabelle in the exact same manner.)</p><p>We assign a unique identifier -simply a non-negative integer, starting with 0to each clause of the original CNF formula. Further clauses derived by resolution are assigned identifiers by the solver. Often we are not interested in the clause obtained by resolving just two existing clauses, but only in the result of a whole resolution chain, where two clauses are resolved, the result is resolved with yet another clause, and so on. Consequently, we define an ML <ref type="bibr" target="#b97">[MTHM97]</ref> type of propositional resolution proofs as a pair whose first component is a table mapping integers (to be interpreted as the identifiers of clauses derived by resolution) to lists of integers (to be interpreted as the identifiers of previously derived clauses that are part of the defining resolution chain). The second component of the proof is just the identifier of the empty clause.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>type proof = int list Inttab.table * int</head><p>This type is merely intended as an internal format to store the information contained in a resolution proof. There are many restrictions on valid proofs that are not enforced by this type. For example, it does not ensure that its second component indeed denotes the empty clause, that every resolution step is legal, or that there are no circular dependencies between derived clauses. It is only important that every resolution proof can be represented as a value of type proof, not conversely. The proof returned by zChaff or MiniSat is translated into this internal format, and passed to the actual proof reconstruction algorithm. This algorithm will either generate an Isabelle/HOL theorem, or fail in case the proof is invalid (which should not happen unless the SAT solver contains a bug).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.1">zChaff 's Proof Trace Format</head><p>The format of the proof trace generated by zChaff has not been documented before. Therefore a detailed description of the format and its interpretation, although not the main focus of this paper, seems in order.</p><p>The proof file generated by zChaff consists of three sections, the first two of which are optional (but present in any non-trivial proof). The first section defines clauses derived from the original problem by resolution. A typical line would be "CL: 7 &lt;= 2 3 0", meaning that a new clause was derived by resolving clauses 2 and 3, and resolving the result with clause 0. In this example, the new clause is assigned the identifier 7, which may then be used in further lines of the proof file. Clauses of the original CNF formula are implicitly assigned identifiers starting from 0, in the order they are given in the DIMACS file. When converting zChaff's proof into our internal format, the clause identifiers in a CL line can immediately be added to the table which constitutes the proof's first component, with the new identifier as the key, and the list of resolvents as the associated value.</p><p>The second section of the proof file records variable assignments that are implied by the first section, and by other variable assignments. As an example, consider "VAR: 3 L: 2 V: 0 A: 1 Lits: 4 7". This line states that variable 3 must be false (i.e. its value must be 0; zChaff uses "V: 1" for variables that must be true) at decision level 2, the antecedent being clause 1. The antecedent is a clause in which every literal except for the one containing the assigned variable must evaluate to false because of variable assignments at lower decision levels (or because the antecedent is already a unit clause). The antecedent's literals are given explicitly by zChaff, using an encoding that multiplies each variable by 2 and adds 1 for negative literals. Hence "Lits: 4 7" corresponds to the clause x 2 ∨ ¬x 3 . Our internal proof format does not allow to record variable assignments directly, but we can translate them by observing that they correspond to unit clauses. For each variable assignment in zChaff's trace, a new clause identifier is generated (using the number of clauses derived in the first section as a basis, and the variable itself as offset) and added as a key to the proof's table. The associated list of resolvents contains the antecedent, and is otherwise obtained from the explicitly given literals: for each literal's variable (except for the one that is being assigned), a similar unit clause must have been added to the table before; its identifier computed according to the same formula. We ignore both the value and the level information in zChaff's trace. The former is implicit in the derived unit clause (which contains the variable either positively or negatively), and the latter is implicit in the overall proof structure.</p><p>The last section of the proof file consists of just one line which specifies the conflict clause, a clause which has only false literals: e.g. "CONF: 3 == 4 6". Literals are encoded in the same way as in the second section, so clause 3 would be x 2 ∨ x 3 in this case. We translate this line into our internal proof format by generating a new clause identifier i which is added to the proof's table, with the conflict clause itself and the unit clause for each of the conflict clause's variables as associated resolvents. Finally, we set the proof's second component to i.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.2">MiniSat's Proof Trace Format</head><p>The proof-logging version of MiniSat originally generated proof traces in a rather compact (and again undocumented) binary format, for which we have not implemented a parser. John Matthews <ref type="bibr" target="#b90">[Mat06]</ref> however has extended this version with the ability to produce readable proof traces in ASCII format, similar to those produced by zChaff. We describe the precise proof trace format, and its translation into our internal proof format.</p><p>MiniSat's proof traces, unlike zChaff's, are not divided into sections. They contain four different types of statements: "R" to reference original clauses, "C" for clauses derived via resolution, "D" to delete clauses that are not needed anymore, and "X" to indicate the end of proof. Aside from "X", which must appear exactly once and at the end of the proof trace, the other statements may appear in any number and (almost) any order.</p><p>MiniSat does not implicitly assign identifiers to clauses in the original CNF formula. Instead, "R" statements, e.g. "R 0 &lt;= -1 3 4", are used to establish clause identifiers. This particular line introduces a clause identifier 0 for the clause ¬x 1 ∨x 3 ∨x 4 , which must have been one of the original clauses in this example. (Note that MiniSat, unlike zChaff, uses the DIMACS encoding of literals in its proof trace.) Since our internal proof format uses different identifiers for the original clauses, the translation of MiniSat's proof trace into the internal format becomes parameterized by a renaming R of clause identifiers. An "R" statement does not affect the proof itself, but it extends the renaming. The given literals are used to look up the identifier of the corresponding original clause, and the clause identifier introduced by the "R" statement is mapped to the clause's original (internal) identifier.</p><p>New clauses are derived from existing clauses via resolution chains. A typical line would be "C 7 &lt;= 2 5 3 4 0", meaning that a new clause with identifier 7 was derived by resolving clauses 2 and 3 (with x 5 as the pivot variable), and resolving the result with clause 0 (with x 4 as the pivot variable). In zChaff's notation, this would correspond to "CL: 7 &lt;= 2 3 0". We add this line to the proof's table just like for zChaff, but with one difference: MiniSat's clause identifiers cannot be used directly. Instead, we generate a new internal clause identifier for this line, extend the renaming R by mapping MiniSat's clause identifier (7 in this example) to the newly generated identifier, and apply R to the identifiers of resolvents as well.</p><p>Clauses that are not needed anymore can be indicated by a "D" statement, followed by a clause identifier. Currently such statements are ignored. Making beneficial use of them would require not only a modified proof format, but also a different algorithm for proof reconstruction.</p><p>Finally a line like "X 0 42" indicates the end of proof. The numbers are the minimum and maximum, respectively, identifiers of clauses used in the proof. We ignore the first identifier (which is usually 0 anyway), and use the second identifier, mapped from MiniSat's identifier scheme to our internal one by applying R, as the identifier of the empty clause, i.e. as the proof's second component.</p><p>There is one significant difference between MiniSat's and zChaff's proof traces that should have become apparent from the foregoing description. MiniSat, unlike zChaff, records the pivot variable for each resolution step in its trace, i.e. the variable that occurs positively in one clause partaking in the resolution, and negatively in the other. This information is redundant, as the pivot variable can always be determined from those two clauses: If two clauses containing more than one variable both positively and negatively were to be resolved, the resulting clause would be tautological, i.e. contain a variable and its negation. Both zChaff and MiniSat are smart enough not to derive such tautological clauses in the first place. We have decided to ignore the pivot information in MiniSat's traces, since proof reconstruction for zChaff requires the pivot variable to be determined anyway, and using MiniSat's pivot data would need a modified internal proof format. This however leaves some potential for optimization wrt. replaying MiniSat proofs.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.2.3">Proof Reconstruction</head><p>We now come to the core of this paper. The task of proof reconstruction is to derive False from the original clauses, using information from a value of type proof (which represents a resolution proof found by a SAT solver). This can be done in various ways. In particular the precise representation of the problem as an Isabelle/HOL theorem (or a collection of Isabelle/HOL theorems) turns out to be crucial for performance.</p><p>Naive HOL Representation In an early implementation <ref type="bibr" target="#b108">[Web05a]</ref>, the whole problem was represented as a single theorem (φ * =⇒ False) =⇒ (φ * =⇒ False), where φ * was completely encoded in HOL as a conjunction of disjunctions. Step by step, this theorem was then modified to reduce the antecedent φ * =⇒ False to True, which would eventually prove φ * =⇒ False.</p><p>This was extremely inefficient for two reasons. First, every resolution step required manipulation of the whole (possibly huge) problem at once. Second, and just as important, SAT solvers treat clauses as sets of literals, making implicit use of associativity, commutativity and idempotence of disjunction. Likewise, CNF formulae are treated as sets of clauses, making implicit use of the same properties for conjunction. The encoding in HOL however required numerous explicit rewrites (with theorems like (P ∨ Q) = (Q ∨ P )) to reorder clauses and literals before each resolution step.</p><p>Separate Clauses Representation A better representation of the CNF formula was discussed in [FMM + 06]. In order to understand it, we need to look at the ML datatype of theorems that Isabelle uses internally. Every theorem encodes a sequent Γ φ, where φ is a single formula, and Γ is a finite set of formulae (implemented as an ordered list of terms, although this detail doesn't matter to us). The intended interpretation is that φ holds when every formula in Γ is assumed as a hypothesis. So far we have only considered theorems where Γ = ∅, written φ for short. This was motivated by the normal user-level view on theorems in Isabelle, where assumptions are encoded using implications =⇒ , rather than hypotheses. Isabelle's inference kernel however provides rules that let us convert between hypotheses and implications as we like: ]] =⇒ False, where ¬p i = q j for some i and j, essentially becomes an application of the cut rule. The first clause is rewritten to Γ [[p 1 ; . . . ; p i−1 ; p i+1 ; . . . ; p n ]] =⇒ ¬p i . A derived Isabelle tactic then performs the cut to obtain Γ ∪ Γ [[q 1 ; . . . ; q j−1 ; p 1 ; . . . ; p i−1 ; p i+1 ; . . . ; p n ; q j+1 ; . . . ; q m ]] =⇒ False from the two clauses. Note that this representation, while breaking apart the given clauses into separate theorems allows us to view the CNF formula as a set of clauses, still does not allow us to view each individual clause as a set of literals. Some reordering of literals is necessary before cuts can be performed, and after each cut, duplicate literals have to be removed from the result.</p><formula xml:id="formula_13">{φ} φ Assume Γ ψ Γ \ φ φ =⇒ ψ impI Γ φ =⇒ ψ Γ φ Γ ∪ Γ</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Sequent Representation</head><p>We can further exploit the fact that Isabelle's inference kernel treats a theorem's hypotheses as a set of formulae, by encoding each clause using hypotheses only. Consider the following representation of a clause p 1 ∨ . . . ∨ p n as an Isabelle/HOL theorem:</p><formula xml:id="formula_14">{p 1 ∨ . . . ∨ p n , p 1 , . . . , p n } False.</formula><p>Resolving two clauses p 1 ∨ . . . ∨ p n and q 1 ∨ . . . ∨ q m , where ¬p i = q j , now starts with two applications of the impI rule to obtain theorems {p 1 ∨ . . . ∨ p n , p 1 , . . . , p i−1 , p i+1 , . . . , p n } ¬p i =⇒ False and {q 1 ∨ . . . ∨ q m , q 1 , . . . , q j−1 , q j+1 , . . . , q m } p i =⇒ False.</p><p>We then instantiate a previously proven theorem (P =⇒ False) =⇒ (¬P =⇒ False) =⇒ False (where P is an arbitrary proposition) with p i for P . Instantiation is another basic operation provided by Isabelle's inference kernel. Finally two applications of impE yield {p 1 ∨. . .∨p n , p 1 , . . . , p i−1 , p i+1 , . . . , p n }∪{q 1 ∨. . .∨q m , q 1 , . . . , q j−1 , q j+1 , . . . , q m } False.</p><p>This approach requires no explicit reordering of literals anymore. Furthermore, duplicate literals do not need to be eliminated after resolution. This is all handled by the inference kernel now; the sequent representation is as close to a SAT solver's view of clauses as sets of literals as possible in Isabelle. With this representation, we do not rely on derived tactics anymore to perform resolution, but we can give a precise description of the implementation in terms of (five, as we see above) applications of core inference rules.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>CNF Sequent Representation</head><p>The sequent representation has the disadvantage that each clause contains itself as a hypothesis. Since hypotheses are accumulated during resolution, this leads to larger and larger sets of hypotheses, which will eventually contain every clause used in the resolution proof. Forming the union of these sets takes the kernel a significant amount of time. It is therefore faster to use a slightly different clause representation, where each clause contains the whole CNF formula φ * as a hypothesis. Let</p><formula xml:id="formula_15">φ * ≡ k i=1 C i ,</formula><p>where k is the number of clauses. Using the Assume rule, we obtain a theorem</p><formula xml:id="formula_16">{ k i=1 C i } k i=1 C i .</formula><p>Repeated elimination of conjunction (with the help of two theorems, namely P ∧ Q =⇒ P and</p><formula xml:id="formula_17">P ∧ Q =⇒ Q) yields a list of theorems { k i=1 C i } C 1 , . . . , { k i=1 C i } C k .</formula><p>Each of these theorems is then converted into the sequent form described above, with literals as hypotheses and False as the theorem's conclusion. This representation increases preprocessing times slightly, but throughout the entire proof, the set of hypotheses for each clause now consists of k i=1 C i and the clause's literals only. It is therefore much smaller than before, which speeds up resolution. Furthermore, memory requirements do not increase: the term k i=1 C i needs to be kept in memory only once, and can be shared between different clauses. This can also be exploited when the union of hypotheses is formed (assuming that the inference kernel and the underlying ML system support it): a simple pointer comparison is sufficient to determine that both theorems contain k i=1 C i as a hypothesis (and hence that the resulting theorem needs to contain it only once); no lengthy term traversal is required.</p><p>We should mention that this representation of clauses, despite its superior practical performance, has a small downside. The resulting theorem always has every given clause as a premise, while the theorem produced by the sequent representation only has those clauses as premises that were actually used in the proof. If the logically stronger theorem is needed, it can be obtained by analyzing the resolution proof to identify the used clauses beforehand, and filtering out the unused ones before proof reconstruction.</p><p>We still need to determine the pivot literal (i.e. p i and ¬p i in the above example) before resolving two clauses. This could be done by directly comparing the hypotheses of the two clauses, and searching for a term that occurs both positively and negatively. It turns out to be slightly faster however (and also more robust, since we make fewer assumptions about the actual implementation of hypotheses in Isabelle) to use our own data structure. With each clause, we associate a table that maps integers -one for each literal in the clause -to the Isabelle term representation of a literal. The table is an inverse of the mapping from literals to integers that was constructed for translation into DIMACS format, but restricted to the literals that actually occur in a clause. Positive integers are mapped to positive literals (atoms), and negative integers are mapped to negative literals (negated atoms). This way term negation simply corresponds to integer negation. The table associated with the result of a resolution step is the union of the two tables that were associated with the resolvents, but with the entry for p i (¬p i , respectively) removed from the table associated with the first (second, respectively) clause.</p><p>Another optimization, related not to the representation of individual clauses, but to the overall proof structure, is perhaps more obvious and has been present in our implementations since the beginning. zChaff and MiniSat, during proof search, may generate many clauses that are ultimately not needed to derive the empty clause. Instead of replaying the whole proof trace in chronological order, we perform "backwards" proof reconstruction, starting with the identifier of the empty clause, and recursively proving the required resolvents using depth-first search.</p><p>While some clauses may not be needed at all, others may be used multiple times in the resolution proof. It would be highly inefficient to prove these clauses over and over again. Therefore all clauses proved are stored in an array, which is allocated at the beginning of proof reconstruction (with a size big enough to possibly hold all clauses derived during the proof). Initially, this array only contains clauses present in the original CNF formula, still in their original format as a disjunction of literals. Whenever an original clause is used as a resolvent, it is converted into the sequent format described above. (Note that this avoids converting original clauses that are not used in the proof at all.) The converted clause, along with its literal table, is stored in the array instead of the original (unconverted) clause. Each clause obtained as the result of a resolution chain is stored as well. Reusing a previously proved clause merely causes an array lookup.</p><p>For this reason, it could be beneficial to analyze the resolution chains in more detail: sometimes very similar chains occur in a proof, differing only in a clause or two. Common parts of resolution chains could be stored as additional lemmas (which only need to be derived once), thereby reducing the total number of resolution steps. A detailed evaluation of this idea is beyond the scope of this paper.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2.3">A Simple Example</head><p>In this section we illustrate the proof reconstruction using a small example. Consider the following input formula</p><formula xml:id="formula_18">φ ≡ (¬x 1 ∨ x 2 ) ∧ (¬x 2 ∨ ¬x 3 ) ∧ (x 1 ∨ x 2 ) ∧ (¬x 2 ∨ x 3 ).</formula><p>Since φ is already in conjunctive normal form, preprocessing simply yields the theorem φ = φ. The corresponding DIMACS CNF file, aside from its header, contains one line for each clause in φ:</p><p>-1 2 0 -2 -3 0 1 2 0 -2 3 0 zChaff and MiniSat easily detect that this problem is unsatisfiable. zChaff creates a text file with the following data: CL: 4 &lt;= 2 0 VAR: 2 L: 0 V: 1 A: 4 Lits: 4 VAR: 3 L: 1 V: 0 A: 1 Lits: 5 7 CONF: 3 == 5 6</p><p>We see that first a new clause, with identifier 4, is derived by resolving clause 2, x 1 ∨ x 2 , with clause 0, ¬x 1 ∨ x 2 . The pivot variable which occurs both positively (in clause 2) and negatively (in clause 0) is x 1 ; this variable is eliminated by resolution.</p><formula xml:id="formula_19">¬x 2 ∨ x 3 x 1 ∨ x 2 ¬x 1 ∨ x 2 x 2 x 3 ¬x 2 ∨ ¬x 3 x 1 ∨ x 2 ¬x 1 ∨ x 2 x 2 ¬x 3 ⊥ Figure 2: Resolution Proof found by zChaff</formula><p>Now the value of x 2 (VAR: 2) can be deduced from clause 4 (A: 4). x 2 must be true (V: 1). Clause 4 contains only one literal (Lits: 4), namely x 2 (since 4 ÷ 2 = 2), occuring positively (since 4 mod 2 = 0). This decision is made at level 0 (L: 0), before any decision at higher levels.</p><p>Likewise, the value of x 3 can then be deduced from clause 1, ¬x 2 ∨ ¬x 3 . x 3 must be false (V: 0).</p><p>Finally clause 3 is our conflict clause. It contains two literals, ¬x 2 (since 5 ÷ 2 = 2, 5 mod 2 = 1) and x 3 (since 6 ÷ 2 = 3, 6 mod 2 = 0). But we already know that both literals must be false, so this clause is not satisfiable.</p><p>In Isabelle, the resolution proof corresponding to zChaff's proof trace is constructed backwards from the conflict clause. A tree-like representation of the proof is shown in Figure <ref type="figure" target="#fig_5">2</ref>. Note that information concerning the level of decisions, the actual value of variables, or the literals that occur in a clause is redundant in the sense that it is not needed by Isabelle to validate zChaff's proof. The clause x 2 , although used twice in the proof, is derived only once during resolution (and reused the second time), saving one resolution step in this little example.</p><p>The proof trace produced by MiniSat for the same problem happens to encode a different resolution proof:</p><formula xml:id="formula_20">R 0 &lt;= -1 2 R 1 &lt;= -2 -3 R 2 &lt;= 1 2 R 3 &lt;= -2 3 C 4 &lt;= 3 3 1 C 5 &lt;= 0 2 4 C 6 &lt;= 2 2 4 C 7 &lt;= 5 1 6 X 0 7</formula><p>The first four lines introduce clause identifiers for all four clauses in the original problem, in their original order as well (effectively making the renaming R from Mini-Sat's clause identifiers to internal clause identifiers the identity in this case). The next four lines define four new clauses (one clause per line), derived by resolution. Clause 4 is the result of resolving clause 3 (¬x 2 ∨ x 3 ) with clause 1 (¬x 2 ∨ ¬x 3 ), where x 3 is used as pivot literal. Hence clause 4 is equal to ¬x 2 . Likewise, clause 5 is the result of resolving clauses 0 and 4, and clause 6 is obtained by resolving clauses 2 and 4. Finally resolving clauses 5 and 6 yields the empty clause, which is assigned clause identifier 7. The proof is shown in Figure <ref type="figure" target="#fig_11">3</ref>. Again one resolution step is saved in the Isabelle implementation because clause ¬x 2 is proved only once. </p><formula xml:id="formula_21">¬x 1 ∨ x 2 ¬x 2 ∨ x 3 ¬x 2 ∨ ¬x 3 ¬x 2 ¬x 1 x 1 ∨ x 2 ¬x 2 ∨ x 3 ¬x 2 ∨ ¬x 3 ¬x 2 x 1 ⊥ Figure 3: Resolution Proof</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Evaluation</head><p>In <ref type="bibr" target="#b108">[Web05a]</ref>, we compared the performance of our approach, using the naive HOL problem representation, to that of Isabelle's existing automatic proof procedures on all 42 problems contained in version 2.6.0 of the TPTP library <ref type="bibr" target="#b104">[SS98]</ref> that have a representation in propositional logic. The problems were negated, so that unsatisfiable problems became provable. The benchmarks were run on a machine with a 3 GHz Intel Xeon CPU and 1 GB of main memory. 19 of these 42 problems are rather easy, and were solved in less than a second each by both the existing procedures and the SAT solver approach. On the remaining 23 problems, zChaff proved to be clearly superior to Isabelle's built-in proof procedures. zChaff solved all problems in less than a second, and proof reconstruction in Isabelle/HOL took a few seconds at most for all but one problem: with the naive HOL representation, the proof for problem MSC007-1.008 was reconstructed in just over 12 minutes.</p><p>To give an impression of the effect that the different clause representations discussed in Section 2.2.3 have on performance, Table <ref type="table" target="#tab_4">1</ref> shows the different times required to prove problem MSC007-1.008. The proof found by zChaff for this problem has 8,705 resolution steps. MiniSat finds a proof with 40,790 resolution steps for the same problem, which is reconstructed in about 3.8 seconds total with the sequent representation, and in 1.1 seconds total with the CNF sequent representation. The times to prove the other problems from the TPTP library have decreased in a similar fashion and are well below one second each now.</p><p>This enables us to evaluate the performance on some significantly larger problems, taken from the SATLIB library <ref type="bibr" target="#b85">[HS00]</ref>. These problems do not only push Isabelle's inference kernel to its limits, but also other parts of the prover. While the smaller TPTP problems were converted to Isabelle's input syntax by a Perl script, this approach turns out to be infeasible for the larger SATLIB problems. The Perl script still works fine, but Isabelle's parser (which was mainly intended for small, hand-crafted terms) is unable to parse the resulting theory files, which are several megabytes large, in reasonable time. Also, the prover's user interface is unable to display the resulting formulae. We have therefore implemented our own little parser, which builds ML terms directly from  Statistics for four unsatisfiable SATLIB problems (chosen among those that were used to evaluate zChaff's performance in <ref type="bibr" target="#b110">[ZM03]</ref>) are shown in Tables <ref type="table" target="#tab_24">2 and 3</ref>, for zChaff and MiniSat respectively. The first column shows the time in seconds that it takes the SAT solver to find a proof of unsatisfiability. The second column, "Proof", shows the time in seconds required to replay the proof's resolutions steps in Isabelle/HOL, using the CNF sequent representation of clauses. The third column shows the number of resolution steps performed during proof replay. The last column, "Total", finally shows the total time to prove the problem unsatisfiable in Isabelle, including SAT solving time, proof replay, parsing of input and output files, and any other intermediate preand postprocessing. These timings were obtained on an AMD Athlon 64 X2 Dual Core Processor 3800+ with 4 GB of main memory. An x indicates that the solver ran out of memory, or that the proof trace file exceeded a size of 2 GB. Needless to say that none of these problems can be solved automatically by Isabelle's built-in proof procedures.</p><p>It seems that proof checking in Isabelle/HOL, despite all optimizations that we have implemented, is sometimes about an order of magnitude slower than proof verification with an external checker written in C++ <ref type="bibr" target="#b110">[ZM03]</ref>. From Table <ref type="table" target="#tab_5">2</ref> we conclude that proving unsatisfiability in Isabelle/HOL is by a factor of roughly 2 to 10 slower than using zChaff alone. This additional overhead was to be expected: it is the price that we have to pay for using Isabelle's LCF-style kernel, which is not geared towards propositional logic. However, we also see that proof reconstruction in Isabelle scales quite well with our latest implementation, and that it remains feasible even for large SAT problems.</p><p>Comparing the runtimes for problem c7552mul.miter on the proofs found by zChaff and MiniSat, we see that the time taken to reconstruct a proof does not solely depend on the number of resolutions steps. In particular our algorithm for resolving two clauses, as described in Section 2.2.3, is linear in the length (i.e. number of literals) of those clauses. The average length of a clause is about 31.0 for the MiniSat proof, and about 98.6 for the proof found by zChaff. This explains why the zChaff proof, despite its smaller number of resolution steps, takes longer to reconstruct.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Related Work</head><p>Michael Gordon has implemented HolSatLib <ref type="bibr" target="#b84">[Gor01]</ref>, a library which is now part of the HOL 4 theorem prover. This library provides functions to convert HOL 4 terms into CNF, and to analyze them using a SAT solver. In the case of unsatisfiability however, the user only has the option to trust the external solver. No proof reconstruction takes place, "since there is no efficient way to check for unsatisfiability using pure Hol98 theorem proving" <ref type="bibr" target="#b84">[Gor01]</ref>. A bug in the SAT solver could ultimately lead to an inconsistency in HOL 4.</p><p>Perhaps closer related to our work is the integration of automated first-order provers, in the context of Isabelle recently further explored by Joe Hurd <ref type="bibr" target="#b86">[Hur99,</ref><ref type="bibr">Hur02]</ref>, Jia Meng <ref type="bibr" target="#b92">[Men03]</ref>, and Lawrence Paulson <ref type="bibr">[MP04,</ref><ref type="bibr" target="#b95">MP06]</ref>. Proofs found by the automated system are either verified by the interactive prover immediately <ref type="bibr" target="#b86">[Hur99]</ref>, or translated into a proof script that can be executed later <ref type="bibr">[MP04]</ref>. Also Andreas Meier's TRAMP system <ref type="bibr" target="#b91">[Mei00]</ref> transforms the output of various automated first-order provers into natural deduction proofs. The main focus of their work however is on the necessary translation from the interactive prover's specification language to first-order logic. In contrast our approach is far to instances of propositional tautologies, but we have focused on performance (rather than on difficult translation issues), and we use a SAT solver, rather than a first-order prover. Other work on combining proof and model search includes <ref type="bibr" target="#b78">[dNM06]</ref>.</p><p>A custom-built SAT solver has been integrated with the CVC Lite system [BB04] by Clark Barrett et al. <ref type="bibr" target="#b76">[BBD03]</ref>. While this solver produces proofs that can be checked independently, our work shows that it is possible to integrate existing, highly efficient solvers with an LCF-style prover: the information provided by recent versions of zChaff and MiniSat is sufficient to produce a proof object in a theorem prover, no custom-built solver is necessary.</p><p>An earlier version of this work was presented in <ref type="bibr" target="#b108">[Web05a]</ref>, and improved by Alwen Tiu et al. [FMM + 06]. Furthermore Hasan Amjad <ref type="bibr" target="#b74">[Amj06b]</ref> has recently integrated a proof-generating version of the MiniSat solver with HOL 4 in a similar fashion. In this paper we have discussed our most recent implementation, which is based on a novel clause representation and constitutes a significant performance improvement when compared to earlier work.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Conclusions and Future Work</head><p>The SAT solver approach dramatically outperforms the automatic proof procedures that were previously available in Isabelle/HOL. With the help of zChaff or MiniSat, many formulae that were previously out of the scope of Isabelle's built-in tactics can now be proved -or refuted -automatically, often within seconds. Isabelle's applicability as a tool for formal verification, where large propositional problems occur in practice, has thereby improved considerably.</p><p>Furthermore, using the data structures and optimizations described in this paper, proof reconstruction for propositional logic scales quite well even to large SAT problems and proofs with several hundred thousand resolution steps. The additional confidence gained by using an LCF-style prover to check the proof obviously comes at a price (in terms of runtime), but it's not nearly as expensive as one might have expected after earlier implementations.</p><p>While improving the performance of our implementation, we confirmed an almost self-evident truth: use profiling to see which functions take a lot of time, and focus on improving them -this is were the greatest benefits lie. This was an iterative process. A better implementation would allow us to tackle larger SAT problems, which in turn would uncover new performance bottlenecks. More importantly, we discovered some inefficiencies in the implementation of the Isabelle kernel. (Instantiating a theorem with a term, for example, was linear in the size of the term, rather than in constant time.) These inefficiencies played no important role as long as the kernel only had to deal with relatively small terms, but in our context, where formulae sometimes consist of millions of literals, they turned out to have a negative impact on performance. Subsequently the kernel implementation was modified, and these inefficiencies were removed.</p><p>Tuning an implementation to the extend presented here requires a great deal of familiarity with the underlying theorem prover. Nevertheless our results are applicable beyond Isabelle/HOL. Other interactive provers for higher-order logic, e.g. HOL 4 and HOL-Light, use very similar data structures to represent their theorems. Hasan Amjad has confirmed that the CNF sequent representation works equally well in these provers <ref type="bibr" target="#b74">[Amj06b]</ref>.</p><p>We have already mentioned some possible directions for future work. There is probably not very much potential left to optimize the implementation of resolution itself at this point. However, to further improve the performance of proof reconstruction, it could be beneficial to analyze the resolution proof found by the SAT solver in more detail. Merging similar resolution chains may reduce the overall number of resolutions required, and re-sorting resolutions may help to derive shorter clauses during the proof, which should improve the performance of individual resolution steps. Some preliminary results along these lines are reported in <ref type="bibr" target="#b73">[Amj06a]</ref>.</p><p>The approach presented in this paper has applications beyond propositional reasoning. The decision problem for richer logics (or fragments thereof) can be reduced to SAT [ABC + 02, Str02, MS05, RH06]. Consequently, proof reconstruction for propositional logic can serve as a foundation for proof reconstruction for other logics. Based on our work, only a proof-generating implementation of the reduction is needed to integrate the more powerful, yet SAT-based decision procedure with an LCF-style theorem prover. This has already been used to integrate haRVey, a Satisfiability Modulo Theories (SMT) prover, with Isabelle <ref type="bibr" target="#b88">[Hur06]</ref>. haRVey, like other SMT systems, uses various decision procedures (e.g. congruence closure for uninterpreted functions) on top of a SAT solver.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Implementing an Instantiation-based Theorem Prover for First-order Logic</head><p>Konstantin Korovin The University of Manchester, England korovink@cs.man.ac.uk</p><p>The basic idea behind instantiation-based theorem proving is to combine clever generation of instances of clauses with satisfiability checking of ground formulas. There are a number of approaches developed and implemented in recent years: Ordered Semantic Hyper Linking of Plaisted and Zhu, Disconnection Calculus of Letz and Stenz implemented in DCTP, Model Evolution Calculus of Baumgartner and Tinelli implemented in Darwin, and instantiation approach of Claessen implemented in Equinox.</p><p>One of the distinctive features of the approach we have been developing is a modular combination of ground reasoning with instantiation. In particular, this approach allows us to employ any off-the-shelf propositional satisfiability solver in a general context of first-order reasoning. In our previous work (together with Harald Ganzinger) we developed a theoretical background for this instantiation method and have shown completeness results together with general criteria for redundancy elimination. In order to evaluate the practical applicability, we implemented our method in iProver.</p><p>This talk focuses on implementation issues of an instantiation-based theorem prover, based upon our experience with iProver. We show how our abstract framework can be turned into a concrete implementation. We discuss how well-studied technology of implementing resolution theorem provers can be adapted for implementing iProver and issues specific to instantiation. We show some concrete redundancy criteria for instantiation and how they are implemented in iProver. Finally, we discuss some future directions.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>The development of effective theorem provers for intuitionistic and constructive logics is of interest both for the investigations and applications of such logics to formal software/hardware verification and program synthesis (see i.e. [ML84, Con86, ES99, Men00, FFO02, BC04, AFF + 06]).</p><p>In this paper we present a strategy and an implementation based on a tableau calculus for propositional intuitionistic logic. Our decision procedure implements the tableau calculus of <ref type="bibr" target="#b111">[AFM04]</ref> (this calculus is an enhancement of the calculus given in <ref type="bibr" target="#b119">[Fit69]</ref> and it is related to the tableau and sequent calculi of <ref type="bibr" target="#b124">[MMO97,</ref><ref type="bibr" target="#b121">Hud93,</ref><ref type="bibr" target="#b116">Dyc92]</ref>).</p><p>We introduce some new techniques utilized by our decision procedure to narrow the search space and the width of the proofs. The PSPACE-completeness of intuitionistic validity ([Sta79]) suggests that backtracking and branching cannot be eliminated. In order to improve the time efficiency of the implementations and make them usable, strategies have to be developed to bound backtracking and branching as much as possible.</p><p>The optimizations we present are explained by the Kripke semantics for intuitionistic logic. Such semantical techniques are related to the fact that tableau calculi are strictly joined to the semantics of the logic at hand. Loosely speaking, a tableau proof for a formula is the attempt to build a model satisfying the formula. The construction of such a model proceeds by increasing, step by step, the information necessary to define such a model (thus, step by step the accuracy of the model increases). If the proof ends in a contradiction, then there is no model for the formula. Otherwise, a model satisfying the formula is immediately derived from the proof. With this machinery at hand, first we provide a sufficient condition allowing us to stop a tableau proof without loosing the completeness. Then we describe a technique to bound branching on the formulas which only contain conjunctions and disjunctions. Finally we present a technique to deduce the satisfiability of a set of formulas S, when the satisfiability of a set S and a permutation τ such that S = τ (S ) are known. Such a technique allows us to bound backtracking. Our technique to bound backtracking is different from the semantical technique provided in <ref type="bibr">[Wei98]</ref>.</p><p>Besides the strategy and its completeness, in the final part of the paper we present some experimental results on the implementation PITP. PITP is written in C++ and it is tested on the propositional part of ILTP v1.1.1 benchmark library ([ROK06]). Of 274 propositional benchmarks contained in ILTP v1.1.1, PITP decides 215 formulas including 13 previously unsolved problems within the time limit of ten minutes. To give the reader more elements to evaluate PITP strategies, comparisons with different versions of PITP are provided.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Notation and Preliminaries</head><p>We consider the propositional language L based on a denumerable set of propositional variables or atoms PV and the logical connectives ¬, ∧, ∨, →. (Propositional) Kripke models are the main tool to semantically characterize (propositional) intuitionistic logic Int (see <ref type="bibr" target="#b115">[CZ97]</ref> and <ref type="bibr" target="#b119">[Fit69]</ref> for the details). A Kripke model for L is a structure K = P, ≤, ρ, , where P, ≤, ρ is a poset with minimum ρ and is the forcing relation, a binary relation between elements α of P and p of PV such that α p and α ≤ β imply that β p. The forcing relation is extended in a standard way to arbitrary formulas of L as follows:</p><p>1. α A ∧ B iff α A and α B;</p><formula xml:id="formula_22">2. α A ∨ B iff α A or α B; 3. α A → B iff, for every β ∈ P such that α ≤ β, β A implies β B;</formula><p>4. α ¬A iff for every β ∈ P such that α ≤ β, β A does not hold.</p><p>We write α A when α A does not hold.</p><p>It is easy to prove that for every formula A, if α A and α ≤ β, then β A. A formula A is valid in a Kripke model K = P,≤,ρ, if and only if ρ A. It is well known that Int coincides with the set of formulas valid in all Kripke models.</p><p>If we consider Kripke models K = P,≤,ρ, such that |P | = 1 we get classical models for (propositional) classical logic Cl. Classical models are usually seen as functions σ from PV to {true, f alse}. Given a formula A and a model σ, we use σ |= A with the usual meaning of satisfiability. Finally, given a set S, the set PV(S) denotes the elements of PV occurring in S.</p><p>In the following presentation, we give a brief overview of the tableau calculus Tab of <ref type="bibr" target="#b111">[AFM04]</ref> which is implemented by our decision procedure. The rules of the calculus are given in Table <ref type="table" target="#tab_4">1</ref>. The calculus works on signed well formed formulas (swff for short), where a swff is a (propositional) formula prefixed with a sign T, F or F c . Given a Kripke model K = P,≤,ρ, , a world α ∈ P , a formula A and a set of swffs S, the meaning of the signs is as follows:</p><formula xml:id="formula_23">• α £ TA (α realizes TA) iff α A; • α £ FA iff α A; • α £ F c A iff α ¬A;</formula><p>• α £ S iff α realizes every swff in S.</p><formula xml:id="formula_24">• K £ S iff ρ £ S.</formula><p>A proof table (or proof tree) for S is a tree, rooted with S and obtained by the subsequent application of the rules of the calculus. As an example, let Γ = {T(A ∧ B), T(B ∧ C), F(A ∨ B)}. With " rule T∧ applies to Γ taking H ≡ T(A ∧ B) as main swff" we mean that T∧ applies to Γ as Γ Γ\{T(A∧B)},TA,TB T∧. If no confusion arises we say that a rule applies to Γ or equivalently a rule applies to H. Finally with Rule(H) we mean the rule related to H (in our example Rule(T(A ∧ B)) is T∧).</p><p>For every proof table for S, the depth and the number of symbols occurring in the nodes is linearly bounded in the number of symbols occurring in S (see <ref type="bibr" target="#b111">[AFM04]</ref> for further details). This is the key feature to implement a depth-first decision procedure whose space complexity is O(n lg n) (as a matter of fact, it is well known that to generate all the proof tables in the search space and to visit them with a depth-first strategy, it is sufficient to have a stack containing, for every node of the visited branch, the index of the main swff and a bit to store if the leftmost branch has been visited <ref type="bibr" target="#b121">[Hud93]</ref>).</p><p>We emphasize that the sign F c is introduced to give a specialized treatment of the negated formulas. In this sense the rules for the formulas signed with F c could be rewritten replacing in the F c -rules every occurrence of the sign F c with T¬.</p><p>Given a set of swffs S, the signed atoms of S are the elements of the set δ S = {H|H ∈ S and H is a signed atom}. We say that S contains a complementary pair iff {TA, FA} ⊆ S or {TA, F c A} ⊆ S. Given δ S , we denote with σ δ S the (classical) model defined as follows: if Tp ∈ δ S , then σ δ S (p) = true, σ δ S (p) = f alse otherwise.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Given a classical model σ,</head><formula xml:id="formula_25">(i) σ £TH iff σ |= H; (ii) σ £FH and σ £F c H iff σ |= H.</formula><p>Given a set of swffs S, a swff H (respectively a set of swffs S ) and the set of signed atoms δ S defined as above, we say that δ S r ealizes H (respectively δ S realizes S ), and we write δ S £ H (respectively δ S £ S ), iff there exists a classical model σ fulfilling the following conditions:</p><formula xml:id="formula_26">(i) σ £ H (respectively σ £ H for every H ∈ S ). (ii) if Tp ∈ δ S , then σ(p) = true; (iii) if Fp ∈ δ S or F c p ∈ δ S ,</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>then σ(p) = f alse</head><p>In other words, the relation £ between δ S and a signed formula H holds if there exists a classical model σ both realizing δ S and H. If {Tp, F c p, Fp} ∩ S = ∅ then there is no condition on the truth value that σ gives to p. We say that a wff or a swff is classically evaluable (cle-wff and cle-swff for short) iff conjunctions and disjunctions are the only connectives occurring in it. Finally, a set S of swffs is contradictory if at least one of the following conditions holds:</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>S, T(A</head><formula xml:id="formula_27">∧ B) S, TA, TB T∧ S, F(A ∧ B) S, FA|S, FB F∧ S, F c (A ∧ B) S c , F c A|S c , F c B F c ∧ S, T(A ∨ B) S, TA|S, TB T∨ S, F(A ∨ B) S, FA, FB F∨ S, F c (A ∨ B) S, F c A, F c B F c ∨ S, TA, T(A → B) S, TA, TB T → Atom, with A an atom S, F(A → B) S c , TA, FB F → S, F c (A → B) S c , TA, F c B F c → S, T(¬A) S, F c A T¬ S, F(¬A) S c , TA F¬ S, F c (¬A) S c , TA F c ¬ S, T((A ∧ B) → C) S, T(A → (B → C)) T → ∧ S, T(¬A → B) S c , TA|S, TB T → ¬ S, T((A ∨ B) → C) S, T(A → p), T(B → p), T(p → C) T → ∨ S, T((A → B) → C) S c , TA, Fp, T(p → C), T(B → p)|S, TC T →→ where S c = {TA|TA ∈ S} ∪ {F c A|F c A ∈ S}</formula><p>1. S contains a complementary pair; 2. S contains a cle-swff H such that δ S H; 3. δ S S and for every propositional variable p occurring in S, Tp ∈ S or F c p ∈ S. Proposition 2.1 If a set of swffs S is contradictory, then for every Kripke model K = P,≤,ρ, , ρ S.</p><p>Proof: If S is contradictory because the first condition holds, then for some formula A, {TA, FA} ⊆ S or {TA, F c A} ⊆ S holds. By the meaning of the signs and the definition of the forcing relation in Kripke models, the claim immediately follows. If S is contradictory because the second condition holds, then δ S H. Thus, there is no classical model realizing both the signed atoms of S (that is δ S ) and H. Since H is a cle-swff, its classical and intuitionistic realizability coincide, thus no Kripke model realizes S. If S is contradictory because the third condition holds, then let us suppose there exists a Kripke model K = P,≤,ρ, such that ρ £ S. Then for every p ∈ PV(S), ρ p or ρ ¬p and this means that every world in K forces the same propositional variables occurring in S, that is for every α, β ∈ P and for every p ∈ PV(S), α p iff β p. Let φ ∈ P be a maximal state in the poset P, ≤, ρ . Since φ behaves as a classical model, by hypothesis, φ S. Then, since every world of K forces the same propositional variables of S we deduce that ρ S.</p><p>A closed proof table is a proof table whose leaves are all contradictory sets. A closed proof table is a proof of the calculus: a formula A is provable if there exists a closed proof table for {FA}. For every rule of the calculus it is easy to prove that if there exists a Kripke model K = P,≤,ρ, and α ∈ P such that α realizes the premise of the rule, then there exists (a possibly different) Kripke model K = P , ≤ , ρ , and β ∈ P such that β realizes the conclusion. This is the main step to prove the soundness of the calculus: Theorem 2.2 (Soundness) Let A be a wff. If there exists a closed proof table starting from {FA}, then A is valid.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">The optimizations and the proof search algorithm</head><p>Below we describe a recursive procedure Tab(S) which given a set S of swffs, returns either a closed proof table for S or NULL (if there exists a Kripke model realizing S).</p><p>To describe Tab we use the following notation. Let S be a set of swffs, let H ∈ S and let S 1 or S 1 | S 2 be the nodes of the proof tree obtained by applying to S the rule Rule(H) corresponding to H. <ref type="bibr">If</ref>  </p><formula xml:id="formula_28">(T(A ∧ B)) = { TA, TB }, R 1 (T(A ∨ B)) = {TA}, R 2 (T(A ∨ B)) = {TB}, R 1 (T((A → B) → C)) = { TA, Fp, T(B → p), T(p → C) }, R 2 (T((A → B) → C)) = {TC}.</formula><p>As stated in the introduction, Tab uses substitutions. For our purposes, the only substitutions we are interested in are permutations from PV to PV. Given a substitution τ and a swff H (respectively, a set of swffs S and tableau T ) τ (H) (respectively, τ (S) and τ (T )) means the swff (respectively, the set of swffs and the tableau) obtained by applying the substitution in the obvious way.</p><p>The procedure Tab divides the formulas in six groups according to their behavior with respect to branching and backtracking:</p><formula xml:id="formula_29">C 1 = {T(A ∧ B), F(A ∨ B), F c (A ∨ B), T(¬A), T(p → A) with p an atom, T((A ∧ B) → C), T((A ∨ B) → C)}; C 2 = {T(A ∨ B), F(A ∧ B); C 3 = {F(¬A), F(A → B)}; C 4 = {T((A → B) → C), T(¬A → B)}; C 5 = {F c (A → B), F c (¬A)}; C 6 = {F c (A ∧ B)}.</formula><p>We call C i -swffs (i = 1, . . . , 6), respectively C i -rules, the swffs of the group C i , respectively the rules related to C i -swffs.</p><p>The intuition behind these groups can be explained as follows. It is well known that in classical logic the order in which the rules are applied does not affect the completeness of the decision procedure: if a proof exists it is found independently of the order in which the rules are applied. Thus the search space consists of a single proof tree whose branches have to be visited by the decision procedure. The rules having two conclusions give rise to branches. Rules of this kind are T∨ and F∧. In intuitionistic logic the order in which the rules are applied is relevant and affect the completeness. Given a set Γ, there are many ways to go on with the proof (that is many swffs can be chosen as main swff). Since the order is relevant, if the choice of a swff as main swff does not give a closed proof table, we have to backtrack and try with another swff as main swff. This means that in intuitionistic logic there is a space of proof tables to be visited by backtracking. Rules requiring backtracking are, i.e., F →, T →→. In order to bound time consumption, Tab applies the rules not requiring branching and backtracking first, then the rules not requiring backtracking, finally the rules requiring backtracking. In the Completeness Lemma (Lemma 4.1, page 11) we prove that we do not lose completeness if C 5 and C 6 -rules are applied only when no other rule applies. Thus the application of C 5 and C 6 -rules is invertible and no backtracking is required. On the other hand, to get completeness, backtracking is unavoidable when C 3 and C 4 -rules are applied. Now we come to the optimizations. First we discuss two checks that allow us to bound the depth of the proofs. Let S be a set such that σ δ S £ S. Thus the Kripke model coinciding with the classical model σ δ S realizes S and we do not need to go on with the proof. The second check is related to Point 3 in the definition of contradictory set. If δ S S and every propositional variable occurring in S occurs in S as swff signed T or F c , then there is no Kripke model realizing S and we do not need to proceed with the proof. Although these checks could be performed after every rule application, our strategy performs it when neither a C 1 nor a C 2 -rule applies to S (in Section 5 this optimization is referred as opt1).</p><p>In order to bound branching, Tab treats in a particular way the cle-swffs in S, that is the swffs in which only ∧ and ∨ occur (in other words swffs whose intuitionistic truth coincides with classical truth). When Tab treats C 2 -swffs (Point 3 of the algorithm given below), first of all Tab checks if in S there exists a cle-swff H such that δ S H. If S fulfills this condition, then S is not realizable, as we pointed out in Proposition 2.1. Otherwise, if in S a cle-swff H occurs, such that σ δ S does not satisfy H, the splitting rule Rule(H) is applied (we recall that σ δ S is the classical model defined from S by taking as true the atoms p such that Tp ∈ S). Thus the cle-swffs of S, satisfying the underlying model, are never applied. Despite this, from the realizability of one among (S \ {H}) ∪ R 1 (H) and (S \ {H}) ∪ R 2 (H) we have enough information to prove the realizability of S. In other words, we consider only the cle-swffs of C 2 that are not realized by the model underlying S (see Point 3 and Completeness Lemma for the details). As an example consider S = { F(P 0 ∧ P 2), F(P 0 ∧ P 4), F(P 2 ∧ P 4), F(P 1 ∧ P 3), F(P 1 ∧ P 5), F(P 3 ∧ P 5), T(P 0 ∨ P 1), T(P 2 ∨ P 3), T(P 4 ∨ P 5) }.</p><p>Since σ δ S realizes the F-swffs in S but σ δ S does not realize any of the T-swffs of S, then Tab chooses one of them, let us suppose H = T(P 0 ∨ P 1). The rule T∨ is applied to S and S 1 = (S \ {H}) ∪ {TP 0} and S 2 = (S \ {H}) ∪ {TP 1} are the subsequent sets of S. Now consider S 1 . Since σ δ S 1 realizes TP 0 and all the F-swffs in S 1 , but σ δ S 1 realizes neither T(P 2 ∨ P 3) nor T(P 4 ∨ P 5), Tab chooses one of them, let us suppose H = T(P 2 ∨ P 3). The rule T∨ is applied to S 1 and S 3 = (S 1 \ {H}) ∪ {TP 2} and S 4 = (S 1 \ {H}) ∪ {TP 3} are the subsequent sets of S 1 . Since δ S 3 does not realize F(P 0 ∧ P 2) we deduce that S 3 is contradictory. Similarly for the other sets. We emphasize that at every step, the C 2 -rules applicable to S i are only the ones where the related swffs are not realized by σ δ S i . Without this strategy a huge closed proof table could arise (in Section 5 this optimization is referred as opt2).</p><p>To bound the search space, [Wei98] describes a decision procedure in which backtracking is bounded by a semantical technique inspired by the completeness theorem. The completeness theorem proves the satisfiability (realizability in our context) of a set S under the hypothesis that S does not have any closed proof table. As an example, let S = {F(A → B), T((A → B) → C), FC, TD}. From S we can define the Kripke model K(S) = P,≤,ρ, such that P = {ρ} and ρ D. Note that K(S) realizes TD but K(S) does not realize S. To prove the realizability of S, the realizability of S 1 = {TA, FB, T((A → B) → C), TD} and one between S 2 = {TA, Fp, T(B → p), T(p → C), TD} and S 3 = {F(A → B), TC, FC, TD} have to be proved. Since S 3 is not realizable, the realizability of S 1 and S 2 must be proved. From S 1 we define the Kripke model K(S 1 ) = {α},≤,α, , where α ≤ α, α D and α A such that K(S 1 ) £ S 1 . If we glue K(S1) above K(S) we get a new Kripke model K = {ρ, α},≤,ρ, where ρ ≤ ρ, ρ ≤ α, α ≤ α, ρ D,α D and α A. Since K £ S, we do not need to apply T →→ to S in order to obtain S 2 = {TA, Fp, T(B → p), T(B → C), TD} (from S 2 a Kripke model K(S 2 ) is definable; K(S 2 ) glued above K(S) gives rise to a Kripke model realizing S). In this case the work on S 2 is spared. In the general case, see [Wei98], the information collected from non closed proof tables built from a set S is used to build a Kripke model K. As a matter of fact, let S be a set such that no C 1 or C 2 -rule is applicable. Let {H 1 , . . . , H u } ⊆ S the C 3 and C 4 -swffs of S. If there exists a H j such that K H j , then Rule(H j ) have to be applied. If a closed proof table is found, then S is not realizable, otherwise the Kripke model K can be extended in a new one, K j satisfying H j . The procedure of [Wei98] continues until a closed proof table or a Kripke model K i , 1 ≤ i ≤ u, such that K i £ {H 1 , . . . , H u } is found. The procedure prunes the search space, since in S not all the swffs requiring backtracking are considered, but only the swffs which, when checked, are not realized from the Kripke model at hand. Now, consider S = {F(A → B), F(C → D)}. From S we can define the Kripke model K(S) = P,≤,ρ, such that P = {ρ} and is the empty set. K(S) does not realize S. By applying F → to S with F(A → B) as the main formula we get S 1 = {TA, FB}. The underlying model is K(S 1 ) = {α},≤,α, with α A. K(S 1 ) glued above K(S) gives rise to a model that does not realize F(C → D). Thus we must backtrack. We apply F → to S with F(C → D) as the main formula. We get S 2 = {TC, FD}. The underlying model is K(S 2 ) = {β},≤,β, such that β ≤ β and β C realizes S 2 . By gluing K(S 1 ) and K(S 2 ) above K(S) the resulting model</p><formula xml:id="formula_30">K = {ρ, α, β},≤,ρ, such that ρ ≤ α, ρ ≤ β, ρ ≤ ρ, α ≤ α, β ≤ β, α A and β C realizes S.</formula><p>But by a permutation τ : PV → PV such that τ (C) = A and τ (D) = B, τ (S 2 ) = S 1 and we can build K(S 2 ) = P, ≤, , ρ, from K(S 1 ) as follows: K(S 2 ) has the same poset as K(S 1 ) and is: for every α ∈ P and for every p ∈ PV, α p iff α τ (p). In other words, K(S 1 ) can be translated into K(S 2 ) via τ and we can avoid backtracking on S. As another example consider</p><formula xml:id="formula_31">S = { T(((P 0 → (P 1 ∨ P 2)) → (P 1 ∨ P 2))),</formula><p>T(((P 2 → (P 1 ∨ P 0)) → (P 1 ∨ P 0))), T(((P 1 → (P 2 ∨ P 0)) → (P 2 ∨ P 0))), F((P 1 ∨ (P 2</p><formula xml:id="formula_32">∨ P 0))) },</formula><p>where only a few steps are needed to obtain S starting from {FH}, where H is the axiom schema 2 i=0 (P i → j =i P j ) → j =i P j → 2 i=0 P i characterizing the logic of binary trees (a logic in the family of k-ary trees logics, <ref type="bibr" target="#b115">[CZ97]</ref>, also known as Gabbay-de Jongh logics). From S we can define the model K(S) = P,≤,ρ, such that P = {ρ} and is the empty set. K(S) does not realize S. By applying T →→ to S with H = T(((P 0 → (P 1 ∨ P 2)) → (P 1 ∨ P 2))) we get S 1 = (S \ {H}) ∪ R 2 (H) and S 2 = (S \ {H}) c ∪ R 1 (H). Since S 1 is not realizable, to prove the realizability of S we have to prove the realizability of S 2 . S 2 defines the Kripke model K(S 2 ) = {α},≤,α, , where α ≤ α and α P 0. Thus K(S 2 ) £ S 2 holds. Now, if we glue K(S 2 ) above K(S) we get a new model K (S) = {ρ, α},≤,ρ, , where ρ ≤ ρ, ρ ≤ α, α ≤ α and α P 0. K (S) does not realize S. Thus we must backtrack twice:</p><p>(i) by applying T →→ to S with H = T(((P 2 → (P 1 ∨ P 0)) → (P 1 ∨ P 0))) we get,</p><formula xml:id="formula_33">S 3 = (S \ {H}) c ∪ R 1 and S 4 = (S \ {H}) ∪ R 2 (H);</formula><p>(ii) by applying T →→ to S with H = T(((P 1 → (P 2 ∨ P 0)) → (P 2 ∨ P 0))) we get</p><formula xml:id="formula_34">S 5 = (S \ {H}) ∪ R 2 (H) and S 6 = (S \ {H}) c ∪ R 1 (H).</formula><p>In a few steps we find that S 4 and S 6 are not realizable. From S 3 we define the Kripke model K(S 3 ) = {β},≤,β, where β ≤ β and β P 2. K(S 3 ) £ S 3 . From S 5 we define the Kripke model K(S 5 ) = {γ},≤,γ, where γ ≤ γ and γ P 1. K(S 5 ) £ S 5 . Thus by gluing K(S 2 ), K(S 3 ) and K(S 5 ) above K(S) we get a model K realizing S. Since we can define the permutations τ 1 and τ 2 such that τ 1 (S 3 ) = S 2 and τ 2 (S 5 ) = S 2 we can avoid backtracking. Thus no proof of realizability of S 3 or S 5 is needed and the Kripke models realizing S 3 and S 5 can be obtained by applying the permutations on the forcing relation of the Kripke model for S 2 . Thus to avoid backtracking Tab builds a permutation τ between sets of swffs. Let H be C 3 -swff. Before applying Rule(H) we check if there exists a permutation τ from PV(S) to PV(S) such that τ ((S \ {H}) c ∪ R 1 (H)) = (S \ {H }) c ∪ R 1 (H ). We already know that the set (S \ {H }) c ∪ R 1 (H ) obtained treating H is realizable by a Kripke model K . Since we have a permutation τ , then the set (S \{H}) c ∪R 1 (H) is realized by the Kripke model K having the same poset as K and such that for every world α and every propositional variable p, α K p iff α K τ (p). This means that the permutation τ allows us to go from K to K (and the permutation τ −1 allows us to go from K to K ). In particular, τ and τ −1 translate the forcing relation between the models. Analogously if H is a C 4 -swff. We emphasize that given a Kripke model K, a permutation τ and a swff H, K £ H does not imply K £ τ (H). Thus we have taken a different route with respect to [Wei98], where the realizability of τ (H) is checked on K that realizes H and the two methods work in different situations. We emphasize that both methods imply a certain computational cost. The method of [Wei98] implies checking the realizability on a Kripke model, which is time consuming for swffs of the kind T(A → B). Our method can be time consuming if we perform a search of a permutation among the P v(S)! possible permutations. However, as we describe in Section 5, the procedure searches in a small subset of all possible permutations (in Section 5, this optimization is referred as opt3).</p><p>Finally, we could also define a permutation to prove that a set is not realizable. As a matter of fact, if S is not realizable and there exists a permutation τ such that τ (S) = S , then S is not realizable. Thus, given a set S and a C 2 or C 6 -swff H ∈ S, if (S \ {H}) ∪ R 1 (H) is closed and there exists a permutation τ such that τ ((S \ {H}) ∪ R 1 (H)) = (S \ {H}) ∪ R 2 (H) then (S \ {H}) ∪ R 2 (H) is not realizable and the tableau proof for (S \ {H}) ∪ R 1 (H) can be translated via τ in a tableau proof for (S \ {H}) ∪ R 2 (H) (see Points 3 and 6 of Tab). As a trivial application, consider a valid wff H(p), where p = {p 1 , . . . , p n } are all the propositional variables occurring in H. To prove that {F(H(p) ∧ H(q))} is closed, it is sufficient to prove, by an application of F∧, that {FH(p)} is closed and there exists a permutation such that {FH(p)} = τ ({FH(q)}).</p><p>To save work space, we describe Tab in natural language. The algorithm is divided in seven main points. We recall that the input of Tab is a set S of swffs. If S is realizable, then Tab returns NULL, otherwise Tab returns a closed proof table for S. In the following description, given a set V of swffs, Tab (V) is the recursive call to Tab with actual parameter V . Some instructions 'return NULL' are labeled with r1,. . . , r6.</p><p>In the Completeness Lemma we refer to such instructions by means of these labels. Function Tab (S) 1. If S contains a complementary pair, then Tab returns the proof S; </p><formula xml:id="formula_35">2. If a C 1 -rule applies to S, then let H be a C 1 -swff. If Tab((S \ {H}) ∪ R 1 (H)) returns a proof π,</formula><formula xml:id="formula_36">H be a C 2 -swff. Let π 1 = Tab(S \ {H} ∪ R 1 (H)). If π 1 is NULL, then Tab returns NULL. If there exists a permutation τ such that (S \ {H}) ∪ R 1 (H) = τ (S \ {H}) ∪ R 2 (H)), then Tab returns the proof S π 1 | τ −1 (π 1 ) Rule(H).</formula><p>If such a permutation does not exist, then let </p><formula xml:id="formula_37">π 2 = Tab(S \ {H} ∪ R 2 (H)). If π 2 is a proof, then</formula><formula xml:id="formula_38">}) c ∪R 1 (H j ) = τ ((S \{H i }) c ∪R 1 (H i )), then let π = Tab((S \{H i }) c ∪R 1 (H i )).</formula><p>If π is a proof, then </p><formula xml:id="formula_39">(S\{H j })∪R 2 (H j ) = τ ((S\{H i })∪R 2 (H i )), then let π 2,i = Tab((S\{H i })∪R 2 (H i )). If π 2,i is NULL, then Tab returns NULL (r3). If there is neither swff H j , with j ∈ {1, . . . , i − 1}, nor a permutation τ such that (S \ {H j }) c ∪ R 1 (H j ) = τ ((S \ {H i }) c ∪ R 1 (H i )), then if Tab((S \ {H i }) c ∪ R 1 (H i )) returns a proof π 1 , Tab returns the proof S π 1 | π 2,i Rule(H i ).</formula><p>(4.2.2) If Point (4.2.1) does not hold, then there exists a permutation τ and a swff</p><formula xml:id="formula_40">H j (j ∈ {1, . . . , i−1}) such that (S \{H j })∪R 2 (H j ) = τ ((S \{H i })∪R 2 (H i )). If there is no swff H u , with u ∈ {1, . . . , i − 1}, and a permutation τ such that (S \ {H u }) c ∪ R 1 (H u ) = τ ((S \ {H i }) c ∪ R 1 (H i )), then if Tab((S \ {H i }) c ∪ R 1 (H i )) returns a proof π 1 , Tab returns the proof S π 1 | τ −1 (π 2,j ) Rule(H i ).</formula><p>If in Points (4.1) and (4.2) Tab does not find any closed proof table, then Tab returns NULL (r4). If such a permutation does not exist, then let</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.">If a C</head><formula xml:id="formula_41">π 2 = Tab((S \ {H}) c ∪ R 2 (H)). If π 2 is a proof, then Tab returns S π 1 | π 2</formula><p>Rule(H), otherwise (π 2 is NULL) Tab returns NULL (r5);</p><p>7. If none of the previous points apply, then Tab returns NULL (r6).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>end function Tab.</head><p>Point 4 deserves some comments. If the classical model σ δ S realizes S (condition "σ δ S £ S") then such a model is a Kripke model realizing S. If for every propositional variable p ∈ P v(S), Tp ∈ S or F c p ∈ S holds, then the subsequent sets of S do not contain more information than S and from σ δ S S we deduce δ S S. Since δ S S, then S is not realizable (see Proposition 2.1). The iteration in Points 4.1 and 4.2 can be summarized as follows: a proof for S is searched: for every C 3 or C 4 -swff H in S, Rule(H) is applied to S. If no proof is found, then S is realizable. Now consider C 3 -swffs. If for a previous iteration j the set obtained by applying Rule(H j ) to S is realizable and can be translated via a permutation τ in (S \ {H i }) c ∪ R(H i ), then Tab does not apply Rule(H i ) to S. The permutation τ and the realizability of (S \ {H j }) c ∪ R(H j ) imply the realizability of (S \ {H i }) c ∪ R(H i ) (see Case 4 in Completeness Lemma). Tab applies the same idea in Point 4.2 to C 4 -swffs of S. This point is more complex because C 4 -rules have two conclusions.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">Completeness</head><p>In order to prove the completeness of Tab, we prove that given a set of swffs S, if the call Tab(S) returns NULL, then we have enough information to build a countermodel K = P,≤,ρ, such that ρ £ S. To prove the proposition we need to introduce the function deg defined as follows:</p><formula xml:id="formula_42">• if p is an atom, then deg(p) = 0; • deg(A ∧ B) = deg(A) + deg(B) + 2; • deg(A ∨ B) = deg(A) + deg(B) + 3; • deg(A → B) = deg(A) + deg(B)+ (number of implications occurring in A) + 1; • deg(¬A) = deg(A) + 1; • deg(S) = H∈S deg(H).</formula><p>It is easy to show that if S is obtained from a set of swffs S by an application of a rule of Tab, then deg(S ) &lt; deg(S).</p><p>Lemma 4.1 (Completeness) Let S be a set of swffs and suppose that Tab(S) returns the NULL value. Then, there is a Kripke model K = P,≤,ρ, such that ρ £ S.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Proof: The proof goes by induction on the complexity of S, measured with respect to the function deg(S).</head><p>Basis: if deg(S) = 0, then S contains atomic swffs only. Tab(S) carries out the instruction labeled (r6). Moreover, S does not contain sets of the kind {Tp, Fp} and {Tp, F c p}. Let K = P,≤,ρ, be the Kripke model such that P = {ρ} and ρ p iff Tp ∈ S. It is easy to show that ρ £ S.</p><p>Step: Let us assume by induction hypothesis that the proposition holds for all sets S such that deg(S ) &lt; deg(S). We prove that the proposition holds for S by inspecting all the possible cases where the procedure returns the NULL value. Case 1: the instruction labeled r1 has been performed. By induction hypothesis there exists a Kripke model K = P,≤,ρ, such that ρ£(S \{H})∪R 1 (H), with H ∈ C 1 . We prove ρ £ H by proceeding according to the cases of H. If H is of the kind T(A ∧ B), then by induction hypothesis ρ £ {TA, TB}, thus ρ A and ρ B, and therefore ρ A ∧ B. This implies ρ £ T(A ∧ B). The other cases for H ∈ C 1 are similar. Case 2: the instruction labeled r2 has been performed. Thus σ δ S £ S holds. We use σ δ S to define a Kripke model K with a single world ρ such that ρ p iff σ(p) = true. Since ρ behaves as a classical model, ρ £ S holds. Case 3: the instruction labeled r3 has been performed. By induction hypothesis there exists a model K such that ρ £ (S \ {H i }) ∪ R 2 (H i ), where H i ∈ C 4 . Let us suppose that H is of the kind T((A → B) → C), thus ρ £ TC and this implies ρ £ H i . The proof goes similarly if H i is of the kind T(¬A → B). Case 4: the instruction labeled r4 has been performed. This implies that: (i) for every H ∈ S ∩ C 3 , we have two cases: (ia) Tab((S \ {H}) c ∪ R 1 (H)) = NULL, thus by induction hypothesis there exists a Kripke model </p><formula xml:id="formula_43">K H = P H , ≤ H , ρ H , H such that ρ H £ (S \ {H}) c ∪ R 1 (H); (ib) there exists a permutation τ from PV(S) to PV(S) and a swff H ∈ S ∩ C 3 such that Tab((S \ {H }) c ∪ R 1 (H )) = NULL and (S \{H }) c ∪R 1 (H ) = τ ((S \{H}) c ∪R 1 (H)).</formula><formula xml:id="formula_44">= P H , ≤ H , ρ H , H such that ρ H £ (S \ {H}) c ∪ R 1 (H). (iib) there exist a permutation τ from PV(S) to PV(S) and a swff H ∈ S ∩ C 4 such that Tab((S \ {H }) c ∪ R 1 (H )) = NULL and (S \{H }) c ∪R 1 (H ) = τ ((S \{H}) c ∪R 1 (H)).</formula><p>Thus by Point (a) applied to H , there exists a Kripke model</p><formula xml:id="formula_45">K H = P H , ≤ H , ρ H , H such that ρ H £(S \{H }) c ∪R 1 (H ). By using τ we can translate K H into a model K H = P H , ≤ H , ρ H , H , where P H = P H , ≤ H =≤ H , ρ H = ρ H and for every world α ∈ P H , if p ∈ PV(S), then α H τ (p) iff α H p. By definition of K H , it follows K H £(S \{H}) c ∪R 1 (H)). Let K = P, ≤, ρ,</formula><p>be a Kripke model defined as follows:</p><formula xml:id="formula_46">P = H∈S∩(C 3 S C 4 ) P H ∪ {ρ}; ≤= H∈S∩(C 3 S C 4 ) ≤ H ∪{(ρ, α)|α ∈ P }; = H∈S∩(C 3 S C 4 ) H ∪{(ρ, p)|Tp ∈ S}.</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>By construction of K, ρ £ S</head><p>Case 5: the instruction labeled r5 has been performed. We point out that</p><formula xml:id="formula_47">S ∩ C 1 = S ∩ C 2 = S ∩ C 3 = S ∩ C 4 = S ∩ C 5 = ∅. By induction hypothesis there exists a model K H = P H , ≤ H , ρ H , H such that ρ H £ (S \ {H}) c ∪ R 2 (H), where H ∈ C 6 . Let K = P, ≤, ρ,</formula><p>be a Kripke defined as follows:</p><formula xml:id="formula_48">P = P H ∪ {ρ}, ≤ = ≤ H ∪{(ρ, α)|α ∈ P }, = H ∪{(ρ, p)|Tp ∈ S}. By the construction of K, ρ £ S,</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>in particular, by induction hypothesis ρ H ¬B and therefore ρ H ¬(A ∧ B). This implies ρ £ F c (A ∧ B).</head><p>Case 6: the instruction r6 has been carried out. In this case S contains atomic swffs and swffs of the kind T(p → A) and with Tp / ∈ S. Let K = P,≤,ρ, be the Kripke model such that P = {ρ} and ρ p iff Tp ∈ S. It is easy to show that ρ £ S. As a matter of fact, if T(p → A) ∈ S, since Tp / ∈ S, ρ p therefore ρ p → A. By Lemma 4.1 we immediately get the completeness of Tab.</p><p>Theorem 4.2 (Completeness) If A ∈ Int, then Tab({FA}) returns a closed proof table starting from FA.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Implementation and Results</head><p>We have implemented Tab as an iterative procedure in C++ language. At present there are some features that are missing. First, there is no any kind of lexical normalization. This feature, together with backjumping ( <ref type="bibr" target="#b112">[Bak95]</ref>) and BCP ( <ref type="bibr" target="#b120">[Fre95]</ref>), only to give a partial list of the possible optimization techniques, is typical in theorem provers and will be one of the changes in the new version of the implementation. Moreover, when PITP applies C 3 and C 4 -rules, the search for a permutation proceeds as follows: let S be a set of swffs and let H and H be C 3 -swffs in S. PITP does not perform a full search among the P v(S)! possible permutations. PITP tries to build a permutation τ such that H = τ (H ) and   PITP solves all the formulas in three families and it is the best prover in three families (SYJ202+1, SYJ206+1, SYJ211+1), ft-C solves all the formulas SYJ212+1 and it is the best prover in four families (SYJ207+1, SYJ208+1, SYJ210+1, SYJ212+1), finally STRIP solves all the formulas in two families but in no class is it the best prover. Finally we run PITP on our Xeon 3.2GHz machine to evaluate the effect of using the optimizations described above.</p><formula xml:id="formula_49">τ = τ −1 . If such a τ fulfills (S \ {H}) c ∪ R 1 (H) = τ ((S \ {H }) c ∪ R 1 (H )),</formula><p>It is well known to people working in ATP that an optimization can be effective for one class of formulas and be negative for other classes. In Table <ref type="table" target="#tab_15">4</ref> we compare different optimizations and give the results of their use on some classes of ILTP v1.1.1 formulas. First, PITP without optimizations outperforms the other versions of PITP on the families SYJ211+1 and SYJ212+1. To give an idea of the overhead of the optimizations on formulas where such optimizations do not apply, PITP without optimizations solves the 19th formula of SYJ211+1 in 247.19 seconds and the 10th formula of SYJ212+1 in 76.55 seconds. Among the optimizations the most important seems opt2. When opt2 is not active the performances decrease. Thus even if this optimization can be used only on particular classes of formulas, it dramatically influences the performances (in our opinion this gives an idea of the great importance of bounding branching in propositional intuitionistic logic). With regard to the other optimizations, there are some advantages in some classes and disadvantages in others. In table 5 we provide the results of the comparison between PITP and STRIP on twelve thousand random formulas with three hundred connectives (since the performance of ft-C was worse than STRIP and PITP, table 5 lacks of ft-C). Given the time limit of five minutes, STRIP does not decide 780 formulas, PITP does not decide 16 formulas. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>We are interested in solving probabilistic planning problems, i. e. planning problems, where the execution of an action leads to the desired effects only with a certain probability. For such problems, Markov decision processes have been adopted as a representational and computational model in much recent work, e.g., by <ref type="bibr" target="#b126">[BBS95]</ref>. They are usually solved using the so-called dynamic programming principle <ref type="bibr" target="#b127">[BDH99]</ref> employing a value iteration algorithm. Classical dynamic programming algorithms explicitly enumerate the state space and are thus exponenetial. In recent years several methods have been developed which avoid an explicit enuration of the state space. The most prominent are state abstraction <ref type="bibr" target="#b127">[BDH99]</ref>, heuristic search (e. g. <ref type="bibr" target="#b126">[BBS95,</ref><ref type="bibr" target="#b130">DKKN95]</ref>) and a combination of both as used, for example, in symbolic LAO * <ref type="bibr" target="#b131">[FH02]</ref>.</p><p>A common feature of these approaches is that a Markov decision process is propositionalized before state abstraction techniques and heuristic search algorithms are applied within a value iteration algorithm. Unfortunately, the propositionalization step itself may increase the problem significantly. To overcome this problem, it was first proposed in <ref type="bibr" target="#b129">[BRP01]</ref> to solve first-order Markov decision processes by applying a first-order value iteration algorithm and first-order state abstraction techniques. Whereas this symbolic dynamic programming approach was rooted in a version of the Situation Calculus <ref type="bibr" target="#b142">[Rei91]</ref>, we have reformulated and extended these ideas in a variant of the fluent calculus <ref type="bibr" target="#b135">[HS04]</ref>. In this system, which is now called LIFT-UP, lifted first-order planning under uncertainty can be performed.</p><p>In the LIFT-UP system, states and actions are expressed in the language of the fluent calculus <ref type="bibr" target="#b134">[HS90]</ref>, which is slightly extended to handle probabilities. In addition, value functions and policies are represented by constructing first-order formulas which partition the state space into clusters, referred to as abstract states. Then, value iteration can be performed on top of these clusters, obviating the need for explicit state enumeration. This allows the solution of first-order Markov decision processes without requiring explicit state enumeration or propositionalization. In addition, heuristics are used to guide the search and normalization techniques are applied to eliminate redundant states. The LIFT-UP approach can thus be viewed as a first-order generalization of symbolic LAO * or, alternatively, as symbolic dynamic programming enhanced by heuristic search and state space normalization.</p><p>To evaluate the LIFT-UP system we have developed a domain-dependent implementation called FluCaP. It can solve probabilistic blocksword problems as they appeared, for example, in the colored blocksworld domain of the 2004 International Planning Competition. FluCaP is quite successful and outperforming other systems on truly first-order problems. On the other hand and working towards a domain-independent implementation we have studied θ-subsumption algorithms. θ-subsumption problems arise at various places in the LIFT-UP system: The normalization process requires to check whether one abstract state subsumes another one; the check whether an action is applicable to some abstract state and the computation of set of the successor or predecessor states also requires subsumption. One should observe that the latter application requires to compute a complete set of substitutions.</p><p>In this paper we give an overview of the LIFT-UP approach.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">First-order Markov Decision Processes</head><p>A Markov decision process, is a tuple (Z, A, P, R, C), where Z is a finite set of states, A is a finite set of actions, and P : Z × Z × A → [0, 1], written P(z |z, a), specifies transition probabilities. In particular, P(z |z, a) denotes the probability of ending up at state z given that the agent was in state z and action a was executed. R : Z → R is a real-valued reward function associating with each state z its immediate utility R(z). C : A → R is a real-valued cost function associating a cost C(a) with each action a. A sequential decision problem consists of a Markov decision process and is the problem of finding a policy π : Z → A that maximizes the total expected discounted reward received when executing the policy π over an infinite (or indefinite) horizon. A Markov decision process is said to be first-order if the expressions used to define Z, A and P are first-order. The value V π (z) of a state z with respect to the policy π is defined as</p><formula xml:id="formula_50">V π (z) = R(z) + C(π(z)) + γ z ∈Z P(z |z, π(z))V π (z ),</formula><p>where 0 ≤ γ ≤ 1 is a discount factor. We take γ equal to 1 for indefinite-horizon problems only, i. e. when a goal is reached the system enters an absorbing state in which no further rewards or costs are accrued. A value function V is set to be optimal if it satisfies</p><formula xml:id="formula_51">R(z) + max a∈A {C(a) + γ z ∈Z P(z |z, a)V * (z )} ,</formula><p>for each z ∈ Z; in this case the value function is usually denoted by V * (z). The optimal policy is extracted from the optimal value function.</p><p>We assume that planning problems meet the following requirements:</p><p>1. Each problem has a goal statement, identifying a set of absorbing goal states.</p><p>2. A positive reward is associated with each action ending in a goal state; otherwise it is 0.</p><p>3. A cost is associated with each action. 4. A "done" action is available in all states.</p><p>The "done" action can be used to end any further accumulation of reward. Together, these conditions ensure that an MDP model of a planning problem is a positive bounded model as described by <ref type="bibr" target="#b141">[Put94]</ref>. Such planning problems are also often called stochastic shortest path problems.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">Probabilistic Fluent Calculus</head><p>States, actions, transition probabilities, cost and reward function are specified in a probabilistic and sorted extension of the fluent calculus <ref type="bibr" target="#b134">[HS90,</ref><ref type="bibr" target="#b146">Thi98]</ref>.</p><p>Fluents and States Let Σ denote a set of function symbols containing the binary function symbol • and the nullary function symbol 1.</p><p>• is an AC1-symbol with 1 as unit element. Let Σ − = Σ \ {•, 1}. Non-variable Σ − -terms are called fluents. Let f (t 1 , . . . , t n ) be a fluent. The terms t i , 1 ≤ i ≤ n are called objects. A state is a finite set of ground fluents. Let D be the set of all states.</p><p>Fluent Terms and Abstract States Fluent terms are defined inductively as follows:</p><p>1 is a fluent term; each fluent is a fluent term; if G 1 and G 2 are fluent terms, then so is</p><formula xml:id="formula_52">G 1 • G 2 .</formula><p>Let F be the set of all fluent terms. We assume that each fluent term obeys the singularity condition: each fluent may occur at most once in a fluent term. Because of the latter, there is a bijection • M between ground fluent terms and states. Some care must be taken when instantiating a non-ground fluent term F by a substitution θ because F θ may violate the singularity condition. A substitution θ is allowed for fluent term F if F θ meets the singularity condition.</p><p>Abstract states are expressions of the form F or F • X, where F is a fluent term and X is a variable of sort fluent term. Let S denote the set of abstract states. Abstract states denote sets of states as defined by the mapping • I : S → 2 D : Let Z be an abstract state. Then  </p><formula xml:id="formula_53">1 = on(X 1 , a) • on(a, table), (b) Z 2 = on(X 2 , a) • on(a, table) • Y 2 , (c) Z 3 = on(X 3 , a) • on(a, table) • clear(X 3 ) and (d) Z 4 = on(X 4 , a) • on(a, table) • clear(X 4 ) • Y 4</formula><p>, where a is an object denoting a block, table is an object denoting a table, X 1 , X 2 , X 3 and X 4 are variables of sort object, Y 2 and Y 4 are variables of sort fluent term, on(X i , a), i = 1 . . . 4, is a fluent denoting that some block X i is on a and clear(X i ), i = 3, 4, is a fluent denoting that block X i is clear. This is illustrated in Figure <ref type="figure" target="#fig_4">1</ref>. In other words, abstract states are characterized by means of positive conditions that must hold in each ground instance thereof and, thus, they represent clusters of states. In this way, abstract states embody a form of state space abstraction, which is called first-order state abstraction.</p><p>As a running example, we consider problems taken from the colored Blocksworld scenario, which is an extension of the classical Blocksworld scenario in the sense that along with the unique identifier, each block is now assigned a specific color. Thus, a state description provides an arrangement of colors instead of an arrangement of blocks. For example, a state Z defined as a fluent term:</p><formula xml:id="formula_54">Z = red(X 0 ) • green(X 1 ) • blue(X 2 ) • red(X 3 ) • red(X 4 )• red(X 5 ) • green(X 6 ) • green(X 7 ) • T ower(X 0 , . . . , X 7 ) ,</formula><p>specifies a tower that is comprised of eigth colored blocks.</p><p>Subsumption Let Z 1 and Z 2 be abstract states. Then Z 1 is subsumed by Z 2 , in symbols Z 1 Z 2 , if there exists an allowed substitution θ such that</p><formula xml:id="formula_55">Z 2 θ = AC1 Z 1 . Intuitively, Z 1 is subsumed by Z 2 iff Z I 1 ⊆ Z I 2 .</formula><p>In the LIFT-UP system we are often concerned with the problem of finding a complete set of allowed substitutions solving the AC1-matching problem Z 2 θ = AC1 Z 1 .</p><p>For example, consider the abstract states mentioned in Figure <ref type="figure" target="#fig_4">1</ref>. Then,</p><formula xml:id="formula_56">Z 1 Z 2 with θ = {X 2 → X 1 , Y 2 → 1}, Z 3 Z 2 with θ = {X 2 → X 3 , Y 2 → clear(X 3 )}. However, Z 1 Z 3 and Z 3 Z 1 .</formula><p>Actions Let Σ a denote a set of action names, where Σ a ∩ Σ = ∅. An action space A is a set of expressions of the form (a(X 1 , . . . , X n ), C, E), where a ∈ Σ a , X i , 1 ≤ i ≤ n, are variables or constants, C ∈ F called precondition and E ∈ F called effect of the action a(X 1 , . . . , X n ). E.g., a pickup-action in the blockworld can be specified by</p><formula xml:id="formula_57">(pickup (X, Y ), on(X, Y ) • clear(X) • empty, holding(X) • clear(Y )),</formula><p>where empty denotes that the robot arm is empty and holding(X) that the block X is in the gripper. For simplicity, we will often supress parameters, preconditions and effects of an action (a(X 1 , . . . , X n ), C, E) and refer to it as a instead.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Nature's Choice and Probabilities</head><p>In analogy to the approach in <ref type="bibr" target="#b129">[BRP01]</ref> stochastic actions are decomposed into deterministic primitives under nature's control, referred to as nature's choices. It can be modelled with the help of a binary relation symbol choice as follows: Consider the action pickup (X, Y ):</p><formula xml:id="formula_58">choice (pickup (X, Y ), a) ↔ (a = pickupS (X, Y ) ∨ a = pickupF (X, Y )),</formula><p>where pickupS and pickupF define two nature's choices for action pickup , viz., that it succeeds or fails. For simplicity, we denote the set of nature's choices of an action a as Ch (a) := {a j |choice (a, a j )}.</p><p>For each of nature's choices a j associated with an action a we define the probability prob (a j , a, Z) denoting the probability with which one of nature's choices a j is chosen in a state Z. For example,</p><formula xml:id="formula_59">prob (pickupS (X, Y ), pickup (X, Y ), Z) = .75</formula><p>states that the probability for the successful execution of the pickup action in state Z is .75. We require that for each action the probabilities of all its nature's choices sum up to 1.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Rewards and Costs</head><p>Reward and cost functions are defined for abstract states using the unary relation symbols reward and cost. For example, we might want to give a reward of 500 to all states in which some block X is on block a and 0, otherwise:</p><formula xml:id="formula_60">reward (Z) = 500 ↔ Z (on(X, a), ∅), reward (Z) = 0 ↔ Z (on(X, a), ∅).</formula><p>In other words, the state space is divided into two abstract states depending on whether or not, a block X is on block a. Likewise, value functions can be specified with respect to the abstract states only. Action costs can be analogously defined. E. g., with</p><formula xml:id="formula_61">cost(pickup (X, Y )) = 3</formula><p>the execution of the pickup -action is penalized with 3.</p><p>Forward and Backward Application of Actions An action (a(X 1 , . . . , X n ), C, E) is forward applicable with θ to an abstract state Z ∈ S, denoted as forward (Z, a, θ), if (C • U )θ = AC1 Z, where U is a new variable of sort fluent term and θ is an allowed substitution. If applicable, then the action progresses to or yields the state (E • U )θ. In this case, (E • U )θ is called successor state of Z and denoted as succ(Z, a, θ). An action (a(X 1 , . . . , X n ), C, E) is backward applicable with θ to an abstract state Z ∈ S, denoted as backward (Z, a, θ), if (E • U )θ = AC1 Z, where U is a new variable of sort fluent term and θ is an allowed substitution. If applicable, then the action regresses to the state (C • U )θ. In this case, (C • U )θ is called predecessor state of Z and denoted as pred(Z, a, θ).</p><p>One should observe that the AC1-matching problems involved in the application of actions are subsumption problems, viz. Z (C • U ) and Z (E • U ). Moreover, in order to determine all possible successor or predecessor states of some state with respect to some action we have to compute complete sets of allowed substitutions solving the corresponding subsumption problems.</p><formula xml:id="formula_62">policyExpansion(π, S 0 , G) E := F := ∅ f rom := S 0 repeat to := S Z∈f rom S a j ∈Ch(a) {succ(Z, a j , θ)},</formula><p>where (a, θ) </p><formula xml:id="formula_63">:= π(Z) F := F ∪ (to − G) E := E ∪ f rom f rom := to ∩ G − E until (f rom = ∅) E := E ∪ F G := G ∪ F return (E, F, G) FOVI(E, A, prob, reward, cost, γ, V ) repeat V := V loop for each Z ∈ E loop for each a ∈ A loop for each θ such that forward (Z, a, θ) Q(Z, a, θ) := reward(Z) + cost(a)+ γ P a j ∈Ch(a) prob(a j , a, Z) • V (succ(Z, a j , θ)) end loop end loop V (Z) := max (a,θ) Q(Z, a, θ) end loop V := normalize(V ) r := V − V until stopping criterion π := extractP olicy(V ) return (V, π, r) FOLAO * (A, prob, reward, cost, γ, S 0 , h, ε) V := h G := ∅ For each Z ∈ S 0 , initialize π with an arbitrary action repeat (E, F, G) := policyExpansion(π, S 0 , G) (V, π, r) := FOVI(E, A, prob, reward, cost, γ, V ) until (F = ∅) and r ≤ ε return (π, V )</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">LIFT-UP Algorithm</head><p>In order to solve first-order MDPs, we have developed a new algorithm that combines heuristic search and first-order state abstraction techniques.</p><p>Our algorithm, referred to as LIFT-UP, can be seen as a generalization of the symbolic LAO * algorithm by <ref type="bibr" target="#b131">[FH02]</ref>. Given an initial state, LIFT-UP uses an admissible heuristic to focus computation on the parts of the state space that are reachable from the initial state. Moreover, it specifies MDP components, value functions, policies, and admissible heuristics using a first-order language of the Probabilistic Fluent Calculus. This allows LIFT-UP to manipulate abstract states instead of individual states. The algorithm itself is presented in Figure <ref type="figure" target="#fig_5">2</ref>.</p><p>As symbolic LAO * , LIFT-UP has two phases that alternate until a complete solution is found, which is guaranteed to be optimal. First, it expands the best partial policy and evaluates the states on its fringe using an admissible heuristic function. Then it performs dynamic programming on the states visited by the best partial policy, to update their values and possibly revise the current best partial policy. We note that we focus on partial policies that map a subcollection of states into actions.</p><p>In the policy expansion step, we perform reachability analysis to find the set F of states that have not yet been expanded, but are reachable from the set S 0 of initial states by following the partial policy π. The set of states G contains states that have been expanded so far. By expanding a partial policy we mean that it will be defined for a larger set of states in the dynamic programming step.</p><p>In symbolic LAO * , reachability analysis is performed on propositional algebraic decision diagrams (ADDs). Therefore, an additional preprocessing of a first-order MDP is required at the outset of any solution attempt. This preprocessing involves propositionalization of the first-order structure of an MDP, viz., instantiation of the MDP components with all possible combinations of domain objects. Whereas, LIFT-UP relies on the lifted first-order reasoning, that is, computations are kept on the first-order level avoiding propositionalization. In particular, action applicability check and computation of successors as well as predecessors are accomplished on abstract states directly.</p><p>In the dynamic programming step of LIFT-UP, we employ a modified first-order value iteration algorithm (FOVI) that computes the value only on those states which are reachable from the initial states. More precisely, we call FOVI on the set E of states that are visited by the best current partial policy. In this way, we improve the efficiency of the original FOVI algorithm by <ref type="bibr" target="#b135">[HS04]</ref> by using symbolic dynamic programming together with reachability analysis.</p><p>Given a FOMDP and a value function represented in PFC, FOVI returns the best partial value function V , the best partial policy π and the residual r. In order to update the values of the states Z in E, we assign the values from the current value function to the successors of Z. We compute successors with respect to all nature's choices a j . The residual r is computed as the absolute value of the largest difference between the current and the newly computed value functions V and V , respectively. We note that the newly computed value function V is taken in its normalized form, i.e., as a result of the normalize procedure that will be described in Section 4.2.1. Extraction of a best partial policy π is straightforward: One simply needs to extract the maximizing actions from the best partial value function V .</p><p>As with symbolic LAO * , LIFT-UP converges to an ε-optimal policy when three conditions are met: (1) its current policy does not have any unexpanded states, (2) the residual r is less than the predefined threshold ε, and (3) the value function is initialized with an admissible heuristic. The original convergence proofs for LAO * and symbolic LAO * by <ref type="bibr" target="#b136">[HZ01]</ref> carry over in a straightforward way to LIFT-UP.</p><p>When calling LIFT-UP, we initialize the value function with an admissible heuristic function h that focuses the search on a subset of reachable states. A simple way to create an admissible heuristic is to use dynamic programming to compute an approximate value function. Therefore, in order to obtain an admissible heuristic h in LIFT-UP, we perform several iterations of the original FOVI. We start the algorithm on an initial value function that is admissible. Since each step of FOVI preserves admissibility, the resulting value function is admissible as well. The initial value function assigns the goal reward to each state thereby overestimating the optimal value, since the goal reward is the maximal possible reward.</p><p>Since all computations in LIFT-UP are performed on abstract states instead of individual states, FOMDPs are solved avoiding explicit state and action enumeration and propositionalization. Lifted first-order reasoning leads to better performance of LIFT-UP in comparison to symbolic LAO * , as shown in Section 5.2. </p><formula xml:id="formula_64">Z 0 Z 2 Z 0 Z 1 Z 2 Z 1 Z 2 Z 0 Z 1 Z 2 a 1 1 a 1 2 a) E = { , , } F Z 1 ( ) Z 0 1 Z 2 Z FOVIA ,G = { from } to = { , } F = { } Z 1 Z 2 , G = { , } { , , } Z 1 Z 0 Z 3 a 1 2 Z 4 Z 3 Z 4 Z 1 1 a 2 Z 2 Z 3 G 2 a 2 1 a 1 { } Z 0 Z 5 Z 1 b) c) Z 5 Z 2 Z 0 Z 2 Z 3 Z 4 Z 5 , , , , { } Z 0 Z 2 Z 3 Z 4 Z 5 , , , , { } = from E = to = { } F } G = { , , , , G FOVIA ( ) E = to = { , } F = = from F Z 0 Z 2 Z 3 { } Z 3 F = { , , } Z 3 Z 4 Z 5 Z 2 Z 5 Z 4 { } Z 0 { } Z 2 ,</formula></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">Policy Expansion</head><p>We illustrate the policy expansion procedure in LIFT-UP by means of an example. Assume that we start from the initial state Z 0 and two nondeterministic actions a 1 and a 2 are applicable in Z 0 , each having two outcomes a 1 1 , a 1 2 and a 2 1 , a 2 2 , respectively. Without loss of generality, we assume that the current best policy π chooses a 1 as an optimal action at state Z 0 . We construct the successors Z 1 and Z 2 of Z 0 with respect to both outcomes a 1 1 and a 1 2 of the action a 1 . The fringe set F as well as the set G of states expanded so far contain the states Z 1 and Z 2 only, whereas, the set E of states visited by the best current partial policy gets the state Z 0 in addition. See Figure <ref type="figure" target="#fig_11">3a</ref>. In the next step, FOVI is performed on the set E. We assume that the values have been updated in such a way that a 2 becomes an optimal action in Z 0 . Thus, the successors of Z 0 have to be recomputed with respect to the optimal action a 2 . See Figure <ref type="figure" target="#fig_11">3b</ref>.</p><p>One should observe that one of the a 2 -successors of Z 0 , namely Z 2 , is an element of the set G and thus, it has been contained already in the fringe F during the previous expansion step. Hence, the state Z 2 should be expanded and its value recomputed. This is shown in Figure <ref type="figure" target="#fig_11">3c</ref>, where states Z 4 and Z 5 are a 1 -successors of Z 2 , under assumption that a 1 is an optimal action in Z 2 . As a result, the fringe set F contains the newly discovered states Z 3 , Z 4 and Z 5 and we perform FOVI on E = {Z 0 , Z 2 , Z 3 , Z 4 , Z 5 }. The state Z 1 is not contained in E, because it does not belong to the best current partial policy, and the dynamic programming step is performed only on the states that were visited by the best current partial policy. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">First-order Value Iteration</head><p>The first-order value iteration algorithm (FOVI) produces a first-order representation of the optimal value function and policy by exploiting the logical structure of a first-order MDP. Thus, FOVI can be seen as a first-order counterpart of the classical value iteration algorithm by <ref type="bibr" target="#b128">[Bel57]</ref>.</p><p>In LIFT-UP, the first-order value iteration algorithm serves two purposes: First, we perform several iterations of FOVI in order to create an admissible heuristic h in LIFT-UP. Second, in the dynamic programming step of LIFT-UP, we apply FOVI on the states visited by the best partial policy in order to update their values and possibly revise the current best partial policy.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2.1">Normalization</head><p>It was already mentioned by several authors that value iteration adds a dramatic computational overhead to a solution technique for first-order MDPs if no care about redundant computations is taken <ref type="bibr" target="#b129">[BRP01,</ref><ref type="bibr" target="#b135">HS04]</ref>.</p><p>Recently, there have been proposed an automated normalization procedure that, given a state space, delivers an equivalent one that contains no redundancy <ref type="bibr" target="#b135">[HS04]</ref>. This procedure, referred to as normalize in the LIFT-UP algorithm, is always called before the value function is transmitted to the next iteration step, thereby preventing the propagation of redundancy to the next computation steps. The technique employs the notion of the subsumption relation defined in Section 3. Informally, given two abstract states Z 1 and Z 2 such that Z 1 Z 2 and the values associated to states are identical, Z 1 can be easily removed from the state space because it contains redundant information.</p><p>Table <ref type="table" target="#tab_4">1</ref> illustrates the importance of the normalization algorithm by providing some representative timing results for the first ten iterations of the first-order value iteration. The experiments were carried out on the problem taken from the colored Blocksworld scenario consisting of ten blocks. Even on such a relatively simple problem FOVI with the normalization switched off does not scale beyond the sixth iteration.</p><p>The results in Table <ref type="table" target="#tab_4">1</ref> demonstrate that the normalization during some iteration of FOVI dramatically shrinks the computational effort during the next iterations. The columns labelled S update and S norm show the size of the state space after performing the value updates and the normalization, respectively. For example, the normalization factor, i.e., the ratio between the number S update of states obtained after performing one update step and the number S norm of states obtained after performing the normalization step, at the seventh iteration is 11.6. This means that more than ninety percent of the state space contained redundant information. The fourth and fifth columns in Table <ref type="table" target="#tab_4">1</ref> contain the time Update and Norm spent on performing value updates and on the normalization, respectively. The total runtime Runtime, when the normalization is switched on, is given in the sixth column. The seventh column labelled Runtime w/o norm depicts the total runtime of FOVI when the normalization is switched off. If we would sum up all values in the seventh column and the values in the sixth column up to the sixth iteration inclusively, subtract the latter from the former and divide the result by the total time Norm needed for performing normalization during the first six iterations, then we would obtain the normalization gain of about three orders of magnitude.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">The Planning System FluCaP</head><p>To evaluate the LIFT-UP approach we have developed a domain-dependent implementation called FluCaP. It can solve probabilistic Blocksworld problems as they appeared, for example, in the colored Blocksworld domain of the 2004 International Planning Competition.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.1">Domain-dependent Optimizations</head><p>So far, we have presented a general theory of LIFT-UP for finding solutions in uncertain planning environments which are represented as first-order MDPs. However, several domain-driven optimizations or relaxations have been posed on the general theory. As a result, FluCaP has demonstrated a competitive computational behaviour.</p><p>Action Applicability Since in the Blocksworld domain, all states were fully specified, abstract states were described as fluent terms only. This allows to relax the forward and backward action applicability conditions. Since the cases are symmetric, we will concentrate on the forward action applicability condition that was initially defined as: An action (a(X 1 , . . . , X n ), C, E) is forward applicable with θ to an abstract state Z ∈ S, denoted as forward (Z, a, θ), if (C • U )θ = AC1 Z, where U is a new variable of sort fluent term and θ is an allowed substitution. Since C and Z are fluent terms under the singularity condition, the aforementioned AC1-matching problem can be transformed into the θ-subsumption problem <ref type="bibr">[Rob65]</ref>.</p><p>Moreover, we optimize the obtained θ-subsumption problem further. Since a state description in a colored Blocksworld represents a number of towers, we compare towers of blocks and their color distributions instead of matching respective fluent terms. The experiments have shown that it is much faster to manipulate with towers rather than with fluent terms. For example, assume that an action precondition contains a fluent clear(X). Let a state describe three towers of blocks. By inspecting the uppermost blocks in the towers, we conclude that there are only three blocks, which satisfy the precondition. It would be interesting to check whether this optimization technique can be successfully applied in other planning domains as well.</p><p>Meanwhile, in Section 6, we present some results towards efficient domain-independent solution methods for the θ-subsumption problem.</p><p>Normalization The similar situation occurs in the case of normalization which relies on the subsumption relation defined in Section 3. The AC1-matching problem underlying the subsumption relation reduces to the θ-subsumption problem.</p><p>Again, it is much faster to operate on towers rather than on fluent terms. For example, assume that one state describes two towers of four and three blocks, respectively. Another state also describes two towers but of five and two blocks, respectively. In order to decide, whether one state subsumes another one we try to match the corresponding towers and their color distributions. As experiments have shown, this optimization speeds up the normalization immensely.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.2">Experimental Evaluation</head><p>We demonstrate the advantages of combining the heuristic search together with firstorder state abstraction on a FluCaP system, that has successfully entered the domaindependent track of the probabilistic part of the 2004 International Planning Competition (IPC'2004). The experimental results were all obtained using RedHat Linux running on a 3.4GHz Pentium IV machine with 3GB of RAM.</p><p>In Table <ref type="table" target="#tab_5">2</ref>, we present the performance comparison of FluCaP together with symbolic LAO * on examples taken from the colored Blocksworld (BW) scenario. Our main objective was to investigate whether first-order state abstraction using logic could improve the computational behaviour of a planning system for solving FOMDPs. The colored BW problems were our main interest since they were the only ones represented in first-order terms and hence the only ones that allowed us to make use of the first-order state abstraction.</p><p>At the outset of solving a colored BW problem, symbolic LAO * starts by propositionalizing its components, namely, the goal statement and actions. Only after that, the abstraction using propositional ADDs is applied. In contrast, FluCaP performs first-order abstraction on a colored BW problem directly, avoiding unnecessary grounding. In the following, we show how an abstraction technique affects the computation of a heuristic function. To create an admissible heuristic, FluCaP performs twenty iterations of FOVI and symbolic LAO * performs twenty iterations of an approximate value iteration algorithm similar to APRICODD by <ref type="bibr" target="#b144">[SAHB00]</ref>. The columns labelled H.time and NAS show the time needed for computing a heuristic function and the number of abstract states it covers, respectively. In comparison to FluCaP, symbolic LAO * needs to evaluate fewer abstract states in the heuristic function but takes considerably more time. One can conclude that abstract states in symbolic LAO * enjoy more complex structure than those in FluCaP.</p><p>We note that, in comparison to FOVI, FluCaP restricts the value iteration to a smaller state space. Intuitively, the value function, which is delivered by FOVI, covers a larger state space, because the time that is allocated for the heuristic search in FluCaP is now used for performing additional iterations in FOVI. The results in the column labelled % justify that the harder the problem is (that is, the more colors it contains), the higher the percentage of runtime spent on normalization. Almost on all test problems, the effort spent on normalization takes three percent of the total runtime on average. In order to compare the heuristic accuracy, we present in the column labelled NGS the number of ground states which the heuristic assigns non-zero values to. One can see that the heuristics returned by FluCaP and symbolic LAO * have similar accuracy, but FluCaP takes much less time to compute them. This reflects the advantage of the plain first-order abstraction in comparison to the marriage of propositionalization with abstraction using propositional ADDs. In some examples, we gain several orders of magnitude in H.time.</p><p>The column labelled Total time presents the time needed to solve a problem. During this time, a planner must execute 30 runs from an initial state to a goal state. A one-hour block is allocated for each problem. We note that, in comparison to FluCaP, the time required by heuristic search in symbolic LAO * (i.e., difference between Total time and H.time) grows considerably faster in the size of the problem. This reflects the potential of employing first-order abstraction instead of abstraction based on propositional ADDs during heuristic search.</p><p>The average reward obtained over 30 runs, shown in column Total av. reward, is the planner's evaluation score. The reward value close to 500 (which is the maximum possible reward) simply indicates that a planner found a reasonably good policy. Each time the number of blocks B increases by 1, the running time for symbolic LAO * increases roughly 10 times. Thus, it could not scale to problems having more than seven blocks. This is in contrast to FluCaP which could solve problems of seventeen blocks. We  where the cells n/a denote the fact that a planner did not deliver a solution within the time limit.</p><p>note that the number of colors C in a problem affects the efficiency of an abstraction technique. In FluCaP, as C decreases, the abstraction rate increases which, in turn, is reflected by the dramatic decrease in runtime. The opposite holds for symbolic LAO * .</p><p>In addition, we compare FluCaP with two variants. The first one, denoted as FOVI, performs no heuristic search at all, but rather, employs FOVI to compute the εoptimal total value function from which a policy is extracted. The second one, denoted as FluCaP -, performs 'trivial' heuristic search starting with an initial value function as an admissible heuristic. As expected, FluCaP that combines heuristic search and FOVI demonstrates an advantage over plain FOVI and trivial heuristic search. These results illustrate the significance of heuristic search in general (FluCaP vs. FOVI) and the importance of heuristic accuracy, in particular (FluCaP vs. FluCaP -). FOVI and FluCaP - do not scale to problems with more than seven blocks.</p><p>Table <ref type="table" target="#tab_6">3</ref> presents the performance results of FluCaP on larger instances of one-color BW problems with the number of blocks varying from twenty to thirty four. We believe that FluCaP does not scale to problems of larger size because the implementation is not yet well optimized. In general, we believe that the FluCaP system not be as sensitive to the size of a problem as planners are.</p><p>Our experiments were targeted at the one-color problems only because they are, on the one hand, the simplest ones for us and, on the other hand, the bottleneck for propositional planners. The structure of one-color problems allows us to apply firstorder state abstraction in its full power. For example, for a 34-blocks problem FluCaP operates on about 3.3 thousand abstract states that explode to 9.6 × 10 41 individual states after propositionalization. A propositional planner must be highly optimized in order to cope with this non-trivial state space.</p><p>We note that additional colors in larger instances (more than 20 blocks) of BW problems cause dramatic increase in computational time, so we consider these problems as being unsolved. One should also observe that the number of abstract states NAS increases with the number of blocks non-monotonically because the problems are generated randomly. For example, the 30-blocks problem happens to be harder than the 34-blocks one. Finally, we note that all results that appear in Tables 2 and 3 were obtained by using the new version of the evaluation software that does not rely on propositionalization in contrast to the initial version that was used during the competition.</p><p>The competition domains and results are available in <ref type="bibr" target="#b147">[YLWA05]</ref>.</p><p>6 Domain-independent Methods for θ-subsumption Given two fluent terms Z 1 and Z 2 under singularity condition, Z 1 θ-subsumes Z 2 , written Z 1 AC1 θ Z 2 , iff there exists an allowed substitution θ such that (Z 1 • U )θ = AC1 Z 2 . Initially, θ-subsumption was defined on clauses. Given two clauses C and D, C θ-subsumes D iff there exists a substitution θ such that Cθ ⊆ D <ref type="bibr">[Rob65]</ref>. In general, θ-subsumption is np-complete <ref type="bibr">[KN86]</ref>. In the domain-dependent implementation of the LIFT-UP approach, that was described in the previous section, we have employed domain-driven optimization techniques that have allowed to reduce the complexity of θ-subsumption. This section is devoted to the efficient domain-independent solution methods for θ-subsumption which cope with its np-completeness.</p><p>One approach to cope with the np-completeness of θ-subsumption is deterministic subsumption. A state is said to be determinate if there is an ordering of fluents, such that in step there is a fluent which has exactly one match that is consistent with the previously matched fluents <ref type="bibr" target="#b137">[KL94]</ref>. However, in practice, there may be only few fluents, or none at all, that can be matched deterministically. Recently, in <ref type="bibr" target="#b145">[SHW96]</ref>, it was developed another approach, which we refer to as literal context, LitCon, for short, to cope with the complexity of θ-subsumption. The authors propose to reduce the number of matching candidates for each fluent by using the contextual information. The method is based on the idea that fluents may only be matched to those fluents that possess the same relations up to an arbitrary depth in a clause. As a result, a certain superset of determinate states can be tested for subsumption in polynomial time.</p><p>Unfortunately, as it was shown in <ref type="bibr" target="#b139">[KRS06]</ref>, LitCon does not scale very well up to large depth. Because in some planning problems, the size of state descriptions can be relatively large, it might be necessary to compute the contextual information for large values of the depth parameter. Therefore, we are strongly interested in a technique that scales better than LitCon. In this section, we present an approach, referred to as object context, ObjCon, for short, which demonstrates better computational behaviour than LitCon. Based on the idea of ObjCon, we develop a new θ-subsumption algorithm and compare it with the LitCon-based approach.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.1">Object Context</head><p>In general, a fluent f in a state Z 1 can be matched with several fluents in a state Z 2 , that are referred to as matching candidates of f . LitCon is based on the idea that fluents in Z 1 can be only matched to those fluents in Z 2 , the context of which include the context of the fluents in Z 1 <ref type="bibr" target="#b145">[SHW96]</ref>. The context is given by occurrences of identical objects (variables Vars (Z) and constants Const (Z)) or chains of such occurrences and is defined up to some fixed depth. In effect, matching candidates that do not meet the above context condition can be effortlessly pruned. In most cases, such pruning results in deterministic subsumption, thereby considerably extending the tractable class of states.</p><p>The computation of the context itself is dramatically affected by the depth parameter: The larger the depth is, the longer the chains of objects' occurrences are, and thus, more effort should be devoted to build them. Unfortunately, LitCon does not scale very well up to large depth <ref type="bibr" target="#b139">[KRS06]</ref>. For example, consider a state</p><formula xml:id="formula_65">Z = on(X, Y ) • on(Y, table) • r(X) • b(Y ) • h(X) • h(Y ) • w(X) • l(Y )</formula><p>that can be informally read as: A block X is on the block Y which is on the table, and both blocks enjoy various properties, like color (red r or blue b) or weight (heavy h or light l), they can be wet w. Z contains eight fluents and only three objects. In LitCon, the context should be computed for each of eight fluents in order to keep track of all occurrences of identical objects. What if we were to compute the context for each object instead? In our running example, we would need to perform computations only three times, in this case.</p><p>Herein, we propose a more efficient approach, referred to as ObjCon, for computing the contextual information and incorporate it into a new context-based θ-subsumption algorithm. More formally, we build the object occurrence graph G Z = (V, E, ) for a state Z, where vertices are objects of Z, denoted as Obj (Z), and edges </p><formula xml:id="formula_66">E = {(o 1 , π 1 , f, π 2 , o 2 )| Z contains f (</formula><formula xml:id="formula_67">π 1 1 •f 1 •π 1 2 −→ (o 1 ) π 2 1 •f 2 •π 2 2 −→ . . . π d 1 •f d •π d 2 −→ (o d ) ∈ ObjCon(o, Z, d) iff o π 1 1 •f 1 •π 1 2 −→ o 1 π 2 1 •f 2 •π 2 2 −→ . . . π d 1 •f d •π d 2 −→ o d is a path in G Z of</formula><p>length d starting at o. In our running example, ObjCon(X, Z, 1) of depth 1 of the variable X in Z contains one chain {{r, h, w}</p><formula xml:id="formula_68">1•on•2 −→ {b, h, l}}.</formula><p>Following the ideas of <ref type="bibr" target="#b145">[SHW96]</ref>, we define the embedding of object contexts for states Z 1 and Z 2 , which serves as a pruning condition for reducing the space of matching candidates for Z 1 and Z 2 . Briefly, let</p><formula xml:id="formula_69">OC 1 =ObjCon(o 1 , Z 1 , d), OC 2 =ObjCon(o 2 , Z 2 , d).</formula><p>Then OC 1 is embedded in OC 2 , written OC 1 OC 2 , iff for every chain of labels in OC 1 there exists a chain of labels in OC 2 which preserves the positions of objects in fluents and the labels for each object in OC 1 are included in the respective labels in OC 2 up to the depth d. Finally, if ObjCon(X, Z 1 , d) ObjCon(o, Z 2 , d) then there exists no θ such that (Z 1 • U )µθ = AC1 Z 2 , where µ = {X → o} and U is a new variable of sort fluent term. In other words, a variable X in Z 1 cannot be matched against an object o in Z 2 within a globally consistent match, if the variable's context cannot be embedded in the object's context. Therefore, the substitutions that meet the above condition can be effortlessly pruned from the search space. For any context depth d &gt; 0, the context inclusion is an additional condition that reduces the number of candidates, and hence there exists more often at most one remaining matching candidate.</p><p>Based on the idea of the object context, we describe a new θ-subsumption algorithm in Algorithm 1. Please note that this algorithm provides a complete set of all allowed substitutions which is used later on for determining the set of all possible successors or predecessors of some state with respect to some action. Due to the lack of space, we Algorithm 1: ObjCon-alltheta.</p><p>omit the algorithm for computing all cliques in a substitution graph. However, it can be found in <ref type="bibr" target="#b139">[KRS06]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.2">Experimental Evaluation</head><p>Figure <ref type="figure" target="#fig_13">4</ref> depicts the comparison timing results between the LitCon-based subsumption reasoner, referred to as AllTheta, and its ObjCon-based opponent, referred to as FluCaP. The results were obtained using RedHat Linux running on a 2.4GHz Pentium IV machine with 2GB of RAM. We demonstrate the advantages of exploiting the object-based context information on problems that stem from the colored Blocksworld and Pipesworld planning scenarios. The Pipesworld domain models the flow of oil-derivative liquids through pipeline segments connecting areas, and is inspired by applications in the oil industry. Liquids are modeled as batches of a certain unit size. A segment must always contain a certain number of batches (i.e., it must always be full). Batches can be pushed into pipelines from either side, leading to the batch at the opposite end "falling" into the incident area. Batches have associated product types, and batches of certain types may never be adjacent to each other in a pipeline. Moreover, areas may never have constraints on how many batches of a certain product type they can hold.</p><p>For each problem, there have been done 1000 subsumption tests. The time limit of 100 minutes has been allocated. The results show that FluCaP scales better than AllTheta. It is best to observe on the problems of forteen-, twenty-, and thirtyblocks. As empirical results demonstrate, the optimal value of the depth parameter for Blocksworld and Pipesworld is four.</p><p>The main reason for the computational gain of FluCaP is that it is less sensitive to the growth of the depth parameter. Under the condition that the number of objects in a state is strictly less than the number of fluents and other parameters are fixed, the amount of object-based context information is strictly less than the amount of the literal-based context information. Moreover, on the Pipesworld problems, FluCaP requires two orders of magnitude less time than AllTheta. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="7">Related Work</head><p>We follow the symbolic dynamic programming (SDP) approach within Situation Calculus (SC) of <ref type="bibr" target="#b129">[BRP01]</ref> in using first-order state abstraction for FOMDPs. In the course of first-order value iteration, a state space may contain redundant abstract states that dramatically affect the algorithm's efficiency. In order to achieve computational savings, normalization must be performed to remove this redundancy. However, in the original work by <ref type="bibr" target="#b129">[BRP01]</ref> this was done by hand. To the best of our knowledge, the preliminary implementation of the SDP approach within SC uses human-provided rewrite rules for logical simplification. In contrast, <ref type="bibr" target="#b135">[HS04]</ref> have developed an automated normalization procedure for FOVI brings the computational gain of several orders of magnitude. Another crucial difference is that our algorithm uses heuristic search to limit the number of states for which a policy is computed.</p><p>The ReBel algorithm by <ref type="bibr" target="#b140">[KvOdR04]</ref> relates to LIFT-UP in that it also uses a representation language that is simpler than Situation Calculus. This feature makes the state space normalization computationally feasible.</p><p>All the above algorithms can be classified as deductive approaches to solving FOMDPs. They can be characterized by the following features: (1) they are model-based, (2) they aim at exact solutions, and (3) logical reasoning methods are used to compute abstractions. We should note that FOVI aims at exact solution for a FOMDP, whereas LIFT-UP, due to the heuristic search that avoids evaluating all states, seeks for an approximate solution. Therefore, it would be more appropriate to classify LIFT-UP as an approximate deductive approach to FOMDPs.</p><p>In another vein, there is some research on developing inductive approaches to solving FOMDPs, e.g., by <ref type="bibr" target="#b132">[FYG03]</ref>. The authors propose the approximate policy iteration (API) algorithm, where they replace the use of cost-function approximations as policy representations in API with direct, compact state-action mappings, and use a standard relational learner to learn these mappings. A recent approach by <ref type="bibr" target="#b133">[GT04]</ref> proposes an inductive policy construction algorithm that strikes a middle-ground between deductive and inductive techniques.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="8">Conclusions</head><p>We have proposed a new approach that combines heuristic search and first-order state abstraction for solving first-order MDPs more efficiently. In contrast to existing systems, which start with propositionalizing the decision problem at the outset of any solution attempt, we perform lifted reasoning on the first-order structure of an MDP directly. However, there is plenty remaining to be done. For example, we are interested in the question of to what extent the optimization techniques applied in modern propositional planners can be combined with first-order state abstraction.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="1">Introduction</head><p>In the last decade, the propositional satisfiability (SAT) has become one of the most interesting research problems within artificial intelligence (AI). This tendency can be seen through the development of a number of powerful SAT solvers, based on either systematic search or stochastic local search (SLS), for solving various hard combinatorial search problems such as automatic deduction, hardware and software verification, planning, scheduling, and FPGA routing.</p><p>The power of contemporary systematic SAT solvers derives not only from the underlying Davis-Putnam-Logemann-Loveland (DPLL) algorithm but also from enhancements aimed at increasing the amount of unit propagation, improving the choices of variables for splitting or making backtracking more intelligent. Two of the most important such enhancements are conflict driven clause learning (CDCL), made practicable on a large scale by the watched literal technique, and one-step lookahead. These two tend to exclude each other: the most successful solvers generally incorporate one or the other but not both. The benefits they bring are rather different too, as is clear from the results of recent SAT competitions. For problems in the "industrial" category, CDCL, as implemented in MINISAT <ref type="bibr" target="#b160">[ES03,</ref><ref type="bibr" target="#b171">SE05]</ref>, siege <ref type="bibr" target="#b170">[Rya04]</ref> and zChaff [MMZ + 01, ZMMM01] is currently the method of choice. On random problems, however, lookahead-based solvers such as Dew Satz <ref type="bibr" target="#b151">[AS05]</ref>, Kcnfs <ref type="bibr" target="#b157">[DD01]</ref> and March dl <ref type="bibr" target="#b162">[HvM06]</ref> perform better.</p><p>Lookahead, of course, is expensive at every choice node, while clause learning is expensive only at backtrack points. Since half of the nodes (plus one) in any binary tree are leaves, this difference is significant for lookahead-based solvers which process nodes relatively slowly and gain, if at all, by reducing the search tree size. Looking ahead is an investment in at-node processing which can pay off only if it results in more informed choices with an impact on the total number of nodes visited. Where learnt clauses prune the tree at least as effectively as complex choice heuristics, CDCL must win. This seems to be the case in many classes of highly structured problems such as the "industrial" ones in the SAT competitions. We have no clearer definition than anyone else of "structure", but are interested to find ways in which lookahead-based solvers might detect and exploit it as well as the clause learners do.</p><p>A noteworthy feature of many recent systems is a preprocessing phase, often using inference by some variant of resolution, to transform problems prior to the search. One suggestion we wish to make and explore is that such transformations may help lookahead-based solvers to discover useful structure. That is, much of the reasoning done by nogood inference might be done cheaply "up-front", provided that the subsequent variable choice heuristics are good enough to exploit it. In what follows, we are therefore concerned mainly with the effects of preprocessing, including those of using multiple preprocessors in series, on the performance of a lookahead-based solver Dew Satz on problems where it does poorly in comparison with a clause learning solver (MINISAT). We also include some results and remarks on benefits to be gained in the opposite direction, where MINISAT is helped by preprocessing to attack problems to which Dew Satz is more suited.</p><p>In this paper we propose a multiple preprocessing technique to boost the performance of systematic SAT solvers. The motivation for applying multiple preprocessors prior to the systematic search process is clear: each preprocessor uses a different strategy in objective to simplify clause sets derived from real-world problems that exhibit a great deal structure such as symmetries, variable dependencies, clustering, and the like. Our initial observation showed that each strategy works well for simplifying the structure of some problems, at most of the time, from hard to easy. When a problem exhibits different kinds of structure, then a single preprocessor has difficulty to simplify the structures. In this case, we need to run multiple preprocessors one after the other.</p><p>We report performance statistics for the two solvers, Dew Satz and MINISAT, with and without combinations of five (and for one problem set, six) of the best contemporary SAT preprocessors when solving parity, planning, bounded model checking and FPGA routing benchmark problems from SATLIB and the recent SAT competitions. One finding is that the use of multiple preprocessors one after the other can be much more effective than using any one of them alone, but that the order in which they are applied is significant. We intend our results to be particularly useful to those implementing new preprocessors and solvers.</p><p>The rest of the paper is organized as follows: section 2 addresses the related work. In sections 3 and 4, we briefly describe the preprocessors and solvers examined in our study. The main part of the paper consists of experimental results, and we conclude with a few remarks and suggestions.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="2">Related Work</head><p>Resolution-based SAT preprocessors for CNF formula simplification have a dramatic impact on the performance of even the most efficient SAT solvers on many benchmark problems <ref type="bibr" target="#b165">[LMS01]</ref>. The simplest preprocessor consists of just computing length-bounded resolvents and deleting duplicate and subsumed clauses, as well as tautologies and any duplicate literals in a clause.</p><p>There are two most directly related works. The first one is that of Anbulagan et al. <ref type="bibr" target="#b150">[APSS06]</ref> which examined the integration of five resolution-based preprocessors alone or the combination of them with stochastic local search (SLS) solvers. Their experimental results show that SLS solvers benefit the present of resolution-based preprocessing and multiple preprocessing techniques. And the second one is that of Lynce and Marques-Silva <ref type="bibr" target="#b165">[LMS01]</ref>. They only evaluated empirically the impact of some preprocessors developed before 2001 including 3-Resolution, without considering multiple preprocessing, on the performance of systematic SAT solvers. In recent years, many other preprocessors, which are sophisticated, have been applied to modern propositional reasoners. Among them are 2-SIMPLIFY <ref type="bibr" target="#b154">[Bra01]</ref>, the preprocessor in Lsat <ref type="bibr" target="#b167">[OGMS02]</ref> for recovering and exploiting Boolean gates, HyPre <ref type="bibr" target="#b153">[Bac02,</ref><ref type="bibr" target="#b156">BW04]</ref>, Shatter <ref type="bibr" target="#b152">[ASM03]</ref> for dealing with symmetry structure, NiVER <ref type="bibr" target="#b173">[SP05]</ref> and SatELite <ref type="bibr" target="#b159">[EB05]</ref>. We consider some of these preprocessors plus 3-Resolution in our study.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3">SAT Preprocessors</head><p>We describe briefly the six SAT preprocessors used in the experiments. The first five are all based on resolution and its variants such as hyper-resolution. Resolution [Qui55, DP60, Rob65] itself is widely used as a rule of inference in first order automated deduction, where the clauses tend to be few in number and contain few literals, and where the reasoning is primarily driven by unification. As a procedure for propositional reasoning, however, resolution is rarely used on its own because in practice it has not been found to lead to efficient algorithms. The sixth preprocessor is a special-purpose tool for symmetry detection, which is important for one problem class in the experiments. </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.1">3-Resolution</head></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.2">2-SIMPLIFY</head><p>2-SIMPLIFY <ref type="bibr" target="#b154">[Bra01]</ref> constructs an implication graph from all binary clauses in the problem. Where there is an implication chain from a literal X to X, X can be deduced as a unit which can be propagated. The method also collapses strongly connected components, propagates shared implications, or literals implied in the graph by every literal in a clause, and removes some redundant binary clauses. Experimental results <ref type="bibr" target="#b154">[Bra01,</ref><ref type="bibr" target="#b155">Bra04]</ref> show that systematic search benefits markedly from 2-SIMPLIFY on a wide range of problems.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.3">HyPre</head><p>HyPre [BW04] also reasons with binary clauses, but incorporates full hyper-resolution, making it more powerful than 2-SIMPLIFY. In addition, unit reduction and equality reduction are incrementally applied to infer more binary clauses. It can be costly in terms of time, but since it is based explicitly on hyper-resolution it avoids the space explosion of computing a full transitive closure. HyPre has been used in the SAT solver, 2CLS+EQ <ref type="bibr" target="#b153">[Bac02]</ref>, and we consider it a very promising addition to many other solvers. It is generally useful for exploiting implicational structure in large problems.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.4">NiVER</head><p>Variable Elimination Resolution (VER) is an ancient inference method consisting of performing all resolutions on a chosen variable and then deleting all clauses in which that variable occurs, leaving just the resolvents. It is easy to see that this is a complete decision procedure for SAT problems, and almost as easy to see that it is not practicable because of exponential space complexity. Recently, Subbarayan and Pradhan <ref type="bibr" target="#b173">[SP05]</ref> proposed NiVER (Non increasing VER) which restricts the variable elimination to the case in which there is no increase in the number of literals after elimination. This shows promise as a SAT preprocessor, improving the performance of a number of solvers <ref type="bibr" target="#b173">[SP05]</ref>.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.5">SatELite</head><p>Eén and Biere <ref type="bibr" target="#b159">[EB05]</ref> proposed the SatELite preprocessor, which extends NiVER with a rule of Variable Elimination by Substitution. Several additions including subsumption detection and improved data structures further improved performance in both space and time. SatELite was combined with MINISAT to form SatELiteGTI, the system which dominated the SAT2005 competition on the crafted and industrial problem categories. Since we use MINISAT for our experiments, it is obvious that SatELite should be one of the preprocessors we consider.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="3.6">Shatter</head><p>It is clear that eliminating symmetries is essential to solving realistic instances of many problems. None of the resolution-based preprocessors does this, so for problems that involve a high degree of symmetry we added Shatter <ref type="bibr" target="#b148">[AMS03]</ref> which detects symmetries and adds symmetry-breaking clauses. These always increase the size of the clause set and for satisfiable problems they remove some of the solutions, but they typically make the problem easier by pruning away isomorphic copies of parts of the search space.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4">SAT Solvers</head><p>As noted in Section 1, we concentrate on just two solvers: MINISAT, which relies on clause learning, and Dew Satz, which uses lookahead.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.1">MINISAT</head><p>Sörensson and Eén <ref type="bibr" target="#b160">[ES03,</ref><ref type="bibr" target="#b171">SE05]</ref> released the MINISAT solver in 2005. Its design is based on Chaff, particularly in that it learns nogoods or "conflict clauses" and accesses them during the search by means of two watched literals in each clause. MINISAT is quite small (a few hundred lines of code) and easy to use either alone or as a module of a larger system. Its speed in comparison with similar solvers such as zChaff comes from a series of innovations of which the most important are an activity-decay schedule which proceeds by frequent small reductions rather than occasional large ones, and an inference rule for reducing the size of conflict clauses by introducing a restricted subsumption test. The cited paper contains a brief but informative description of these ideas.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="4.2">Dew Satz</head><p>The solver Dew Satz <ref type="bibr" target="#b151">[AS05]</ref> is a recent version of the Satz solver <ref type="bibr" target="#b163">[LA97]</ref>. Like its parent Satz, it gains efficiency by a restricted one-step lookahead scheme which rates some of the neighbouring variables every time a choice must be made for branching purposes. Its lookahead is more sophisticated than the original one of Satz, adding a DEW (dynamic equality weighting) heuristic to deal with equalities. This enables the variable selection process to avoid duplicating the work of weighting variables detected to be equivalent to those already examined. Thus, while the solver has no special inference mechanism for propositional equalities, it does deal tolerably well with problems containing them.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5">Experimental Results</head><p>We present results on four benchmark problem sets chosen to present challenges for one or other or both of the SAT solvers. The experiments were conducted on a cluster of 16 AMD Athlon 64 processors running at 2 GHz with 2 GB of RAM. Ptime in the tables represents preprocessing time, while Stime represents solvers runtime without including Ptime. The timebound of Stime is 15,000 seconds per problem instance. It is worth noting that in our study the results of SatELiteGTI, the solver which dominated the SAT2005 competition on the crafted and industrial problem categories, are represented by the results of SatELite+MINISAT.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.1">The 32-bit Parity Problem</head><p>The 32-bit parity problem was listed by Selman et al. <ref type="bibr" target="#b172">[SKM97]</ref> as one of ten challenges for research on satisfiability testing. The ten instances of the problem are satisfiable. The first response to this challenge was by Warners and van Maaren <ref type="bibr" target="#b174">[WvM98]</ref> who solved the par32-*-c problem (5 instances) using a special-purpose preprocessor to deal with equivalency conditions. Two years later, Li <ref type="bibr" target="#b164">[Li00]</ref>  al. <ref type="bibr" target="#b167">[OGMS02]</ref>, solved the problems with Lsat, which performs a preprocessing step to recover and exploit the logical gates of a given CNF formula and then applies DPLL with a Jeroslow-Wang branching rule. The challenge has now been met convincingly by Heule et al. <ref type="bibr" target="#b161">[HvM04]</ref> with their March eq solver, which combines equivalency reasoning in a preprocessor with a lookahead-based DPLL and which solves all of the par32* instances in seconds. Dew Satz is one of the few solvers to have solved any instances of the 32-bit parity problem without special-purpose equivalency reasoning <ref type="bibr" target="#b151">[AS05]</ref>.</p><p>Table <ref type="table" target="#tab_4">1</ref> shows the results of running the lookahead-based solver Dew Satz and the CDCL-based solver MINISAT on the ten par32 instances, with and without preprocessing. As preprocessors we used 3-Resolution, HyPre, NiVER and SatELite alone and followed by 3-Resolution for the last three. We eliminated 2-SIMPLIFY from this test as it aborted the resolution process of the first five par32* instances presented in the Table <ref type="table" target="#tab_4">1</ref>. We experimented also with all combination of two preprocessors for the problems par32-1 and par32-4. Where lines are omitted from the table (e.g. there is no line for HyPre on par32-1 and for SatELite+3-Resolution on par32-2), this is because no single solver produced a solution for those simplified instances.</p><p>It is evident from the table that these problems are seriously hard for both solvers. Even with preprocessing, MINISAT times out on all of them except for par32-2 and par32-5-c. Curiously, on par32-2 instance, preprocessing with 3-Resolution makes its performance degrade a little. This is not a uniform effect: Table <ref type="table" target="#tab_15">4</ref> below shows examples in which MINISAT benefits markedly from 3-Resolution. Without preprocessing, Dew Satz times out on nine of ten par32 instances, but in every case except par32-5 and par32-5-c 3-Resolution suffices to help it find a solution, and running multiple preprocessors improves its performance.</p><p>In general, Table <ref type="table" target="#tab_4">1</ref> shows that multiple preprocessing contributes significantly to enhance the performance of Dew Satz and the preprocessor 3-Resolution dominates the contribution through either single or multiple preprocessing.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.2">A Planning Benchmark Problem</head><p>The ferry planning benchmark problems, taken from SAT2005 competition, are all easy for MINISAT, which solves all of them in about one second without needing preprocessors. Dew Satz, however, is challenged by them. The problems are satisfiable. We show the Dew Satz and MINISAT results on the problems in Table <ref type="table" target="#tab_5">2</ref>. Clearly the original problems contain some structure that CDCL is able to exploit but which is uncovered by one-step lookahead. It is therefore interesting to see which kinds of reasoning carried out in a preprocessing phase are able to make that same structure available to Dew Satz. Most strikingly, reasoning with binary clauses in the manner of the 2-SIMPLIFY preprocessor reduces runtimes by upwards of four orders of magnitude in some cases. HyPre, NiVER and SatELite, especially HyPre, are also effective on these planning problems. In most cases the number of backtracks reduces from million to less than 100 or even zero for ferry8 v01a, ferry9 v01a, and ferry10 ks99a instances which means that the input formula is solved at the root node of the search tree.   </p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.3">Bounded Model Checking Problems</head><p>Another domain providing benchmark problem sets which appear to be easy for MINISAT but sometimes hard for Dew Satz is bounded model checking. In Table <ref type="table" target="#tab_6">3</ref> we report results on five of eleven BMC-IBM problems, two BMC-galileo problems and two of four BMC-alpha problems. All other benchmark problems in the BMC-IBM class are easy for both solvers and so are omitted from the table. The other two BMC-alpha instances are harder than the two reported even for MINISAT before and after preprocessing. The problems presented in Table <ref type="table" target="#tab_6">3</ref> are satisfiable.</p><p>Each of these bounded model checking problems is brought within the range of Dew Satz by some form of preprocessing. In general, HyPre and 3-Resolution are the best for this purpose, especially when used together, though on problem BMC-IBM-13 they are ineffective without the additional use of NiVER. The column showing the number of times Dew Satz backtracks is worthy of note. In many cases, preprocessing reduces the problem to one that can be solved without backtracking. Solving "without backtracking" has to be interpreted with care here, of course, since a nontrivial amount of lookahead may be required in a "backtrack-free" search. The results for BMC-galileo-9 furnish a good example of this: HyPre takes 407 seconds to refine the problem, following which Dew Satz spends 90 seconds on lookahead reasoning while constructing the first (heuristic) branch of its search tree, but then that branch leads directly to a solution. Adding 3-Resolution to the preprocessing step does not change the number of variables, and only slightly reduces the number of clauses, but it roughly halves the time subsequently spent on lookahead.</p><p>The instance BMC-alpha-4408 is hard for Dew Satz even after preprocessing. While MINISAT with multiple preprocessing solves the problem instance with an order of magnitude faster. We can also observe that HyPre brings more benefit than SatELite, Table <ref type="table" target="#tab_15">4</ref> shows results for both solvers on a related problem set consisting of formal verification problems taken from the SAT2005 competition. The IBM-FV-01 problems are satisfiable except for the problem IBM-FV-01-k10; the IBM-FV-26 problems are unsatisfiable. Most of these satisfiable problems are easy for MINISAT, but the unsatisfiable cases show that the SatELite preprocessor (with which MINISAT was paired in the competition) is by far the least effective of the four we consider for MINISAT on these problems. The preprocessor HyPre proved the unsatisfiability of IBM-FV-01-k10 in 1.27 seconds. 2-SIMPLIFY was not used to simplify the IBM-FV-26 problems, because it is limited for input formula with maximum 100,000 variables. Again there are cases in which Dew Satz is improved from a 15,000 second timeout to a one-branch proof of unsatisfiability. Note that the numbers of clauses in these cases are actually increased by the preprocessor 3-Resolution, confirming that the point of such reasoning is to expose structure rather than to reduce problem size.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.4">A Highly Symmetrical Problem</head><p>FPGA routing problem is a higly symmetrical problem that model the routing of wires in the channels of field-programmable integrated circuits <ref type="bibr" target="#b148">[AMS03]</ref>. The problem instances used in the experiment, which were artificially designed by Fadi Aloul, are taken from SAT2002 competition.</p><p>Without preprocessing to break symmetries, many of the FPGA routing problems are hard-harder for CDCL solvers than for lookahead-based ones. Not only do they have many symmetries, but the clause graphs are also disconnected. Lookahead techniques with neighbourhood variables ordering heuristic seem able to choose inferences within one graph component before moving to another, whereas MINISAT jumps frequently between components. Table <ref type="table" target="#tab_27">5</ref> shows performances of both solvers on FPGA routing problem set. Of 21 selected satisfiable (bart) problems, MINISAT solves 8 in some 2 hours. It manages better with the unsatisfiable (homer) instances, solving 14 of 15 in a total time of around 6 hours. Dew Satz solves all of the bart problems in 17.5 seconds and the homer ones in 45 minutes.</p><p>The detailed results for two of the satisfiable problems and two unsatisfiable ones (Table <ref type="table" target="#tab_29">6</ref>) are interesting. The resolution-based preprocessors do not give any modification to the size of the input formula except when using SatELite. The Shatter preprocessor, which removes certain symmetries, is tried on its own and in combination with the five resolution-based preprocessors. It should be noted that the addition of symmetry-breaking clauses increases the sizes of the problems, but of course it greatly reduces the search spaces in most cases.</p><p>The performance of Dew Satz after preprocessing is often worse in terms of time than it was before, though there is always a decreases in the size of its search tree. This is because of the increase in the problem size which increases the amount of lookahead process. MINISAT, by contrast, sometimes speeds up by several orders of magnitude after preprocessing.     for automaticallly choosing the order in which to apply successive preprocessors. Machine learning may help here, though it would be better, or at least more insightful, to be able to base decisions on a decent theory about the interaction of reasoning methods.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="5.5">Order of Preprocessors</head><p>3. Another interesting project is to combine preprocessors not as a series of separate modules but as a single reasoner. For example, it would be possible to saturate under 3-Resolution and hyper-resolution together, in the manner found in resolution-based theorem provers. Whether this would be cost-effective in terms of time, and whether the results would differ in any worthwhile way from those obtained by ordering separate preprocessors, are unknown at this stage.</p><p>As SAT solvers are increasingly applied to real-world problems, we expect deductive reasoning by preprocessors to become increasingly important to them.</p></div><figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_0"><head></head><label></label><figDesc>λa.λb.(b = ((λc.(cb))a)) translates to λλ.(x 0 = ((λ.(x 0 x 1 ))x 1 ))</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_3"><head>=</head><label></label><figDesc>if ¬consp(C) then * NIL * else list(if, car(C), * T * , disjoin(cdr(C))) conjoin(LC ) = if ¬consp(LC ) then * T * else list(if, disjoin(car(LC )), conjoin(cdr(LC )), * NIL * )</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_4"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Axioms to support clause processors in GZ. Here *T* and *NIL* are assumed to be the internal representation of T and NIL respectively. The predicate consp is defined in GZ such that consp(x) returns T if x is an ordered pair, and NIL otherwise.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_5"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: Correctness condition for clause processors.Here ev is assumed to be an evaluator for if, and args represents the remaining arguments of tool0 (in addition to clause C). The predicates term-listp and alistp are axiomatized in GZ such that (i) term-listp(x) returns a Boolean, which is T if and only if x is an object in the ACL2 universe representing a well-formed list of terms (and hence a clause), and (ii) alistp(a) returns a Boolean, which is T if and only if a is a well-formed association list.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_6"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: Isabelle -SAT System Architecture</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_7"><head></head><label></label><figDesc>5 -rule applies to S, then let H be a C 5 -swff. If Tab((S \ {H}) c ∪ R 1 (H)) returns a proof π, then Tab returns the proof S π Rule(H), otherwise Tab returns NULL; 6. If a C 6 -rule applies to S, then let H be a C 6 -swff. Let π 1 = Tab((S \ {H}) c ∪ R 1 (H)). If π 1 is NULL, then Tab returns NULL. If there exists a permutation τ such that (S \ {H}) c ∪R 1 (H) = τ ((S\{H}) c ∪R 2 (H)), then Tab returns the proof S π 1 | τ −1 (π 1 )Rule(H).</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_8"><head></head><label></label><figDesc>[Z] I = {[Zθ] M | θ is an allowed grounding substitution for Z}.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_9"><head>Figure 1 :</head><label>1</label><figDesc>Figure 1: The interpretations of the abstract states (a) Z1 = on(X 1 , a) • on(a, table), (b) Z 2 = on(X 2 , a) • on(a, table) • Y 2 , (c) Z 3 = on(X 3 , a) • on(a, table) • clear(X 3 ) and (d) Z 4 = on(X 4 , a) • on(a, table) • clear(X 4 ) • Y 4, where a is an object denoting a block, table is an object denoting a table, X 1 , X 2 , X 3 and X 4 are variables of sort object, Y 2 and Y 4 are variables of sort fluent term, on(X i , a), i = 1 . . . 4, is a fluent denoting that some block X i is on a and clear(X i ), i = 3, 4, is a fluent denoting that block X i is clear.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_10"><head>Figure 2 :</head><label>2</label><figDesc>Figure 2: LIFT-UP algorithm.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_11"><head>Figure 3 :</head><label>3</label><figDesc>Figure 3: Policy Expansion.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_12"><head>Input:</head><label></label><figDesc>Two fluent terms Z1, Z2. Output: A complete set of substitutitons θ such that Z1 AC1 θ Z2. 1. Deterministically match as many fluents of Z1 as possible to fluents of Z2. Substitute Z1 with the substitution found. If some fluent of Z1 does not match any fluent of Z2, decide Z1 AC1 θ Z2. 2. ObjCon-based deterministically match as many fluents of Z1 as possible to fluents of Z2. Substitute Z1 with the substitution found. If some fluent of Z1 does not match any fluent of Z2, decide Z1 AC1 θ Z2. 3. Build the substitution graph (V, E) for Z1 and Z2 with nodes v = (µ, i) ∈ V , where µ is a matching candidate for Z1 and Z2, i.e., matches some fluent at position i in Z1 to some fluent in Z2 and i ≥ 1 is referred to as a layer of v. Two nodes (µ1, i1) and (µ2, i2) are connected with an edge iff µ1µ2 = µ2µ1 and i1 = i2. Delete all nodes (µ, i) with Xµ = o, for some X ∈ Vars (Z1) and o ∈ Obj (Z2), and ObjCon(X, Z1, d) ObjCon(o, Z2, d) for some d. Find all cliques of size |Z1| in (V, E).</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_13"><head>Figure 4 :</head><label>4</label><figDesc>Figure 4: Comparison timing results for FluCaP and AllTheta. The results present the average time needed for one subsumption test. Please note that the plots for Pipesworld are shown in logscale. Therefore small differences in the plot may indicate a substantial difference on runtimes.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" xml:id="fig_14"><head></head><label></label><figDesc>k-Resolution is just saturation under resolution with the restriction that the parent clauses are of length at most k. The special cases of 2-Resolution and 3-Resolution are of most interest. 3-Resolution has been used in a number of SAT solvers, notably Satz<ref type="bibr" target="#b163">[LA97]</ref> and the SLS solver R+AdaptNovelty + [APSS05] which won the satisfiable random problem category in the SAT2005 competition. Since it is the preprocessor already used by Satz, we expect it to work well with Dew Satz.</figDesc></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_0"><head></head><label></label><figDesc>and M. Rusinowitch, editors, Proc. of the 2nd IJCAR, Cork, Ireland, volume 3097 of LNAI, pages 223-228. Springer, 2004. [WBH + 02] Christoph Weidenbach, Uwe Brahm, Thomas Hillenbrand, Enno Keen, Christian Theobald, and Dalibor Topić. Spass version 2.0. In A. Voronkov, editor, Proc. of the 18th CADE, Copenhagen, volume 2392 of LNAI, pages 275-279. Springer, 2002.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_1"><head></head><label></label><figDesc>relativises this impression. Our experiments indicate furthermore an increase of the sharing rate by operations like</figDesc><table><row><cell>Number of indexed terms</cell><cell>977</cell></row><row><cell>Number of created term nodes</cell><cell>11618</cell></row><row><cell>Average term size</cell><cell>54</cell></row><row><cell>Number of nodes with no parent nodes</cell><cell>904</cell></row><row><cell>Number of nodes with one parent node</cell><cell>9633</cell></row><row><cell>Number of nodes with two more more parent nodes</cell><cell>1083</cell></row><row><cell>Maximum number of parent nodes</cell><cell>2778 (symbol ∀)</cell></row><row><cell>Average number of parent nodes</cell><cell>1.68</cell></row><row><cell>Average number of terms a node occurs in</cell><cell>33.5</cell></row><row><cell>-"-(for symbols)</cell><cell>493.9</cell></row><row><cell>-"-(for nonprimitive term nodes)</cell><cell>24</cell></row><row><cell>Average PST/term size for symbol occurrences</cell><cell>0.21</cell></row><row><cell>Average PST/term size for bound variable occurrences</cell><cell>0.33</cell></row><row><cell>Average PST/term size for all term nodes</cell><cell>0.12</cell></row><row><cell>Figure 1: Structure of the Landau sample.</cell><cell></cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_2"><head></head><label></label><figDesc>, Matthews, and Tuttle integrate ACL2 with SMV [RMT03]. Reeber and Hunt connect ACL2 with the Zchaff satisfiability solver [RH06], and Sawada and Reeber provide a connection with SixthSense [SR06]. Manolios and Srinivasan connect ACL2 with UCLID [MS04, MS05].</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_3"><head></head><label></label><figDesc>(with implication associating to the right). In [FMM + 06], each clause p 1 ∨ . . . ∨ p n is encoded as an implication p 1 =⇒ . . . =⇒ p n =⇒ False (where p i denotes the negation normal form of ¬p i , for 1 ≤ i ≤ n), and turned into a separate theorem {p 1 ∨ . . . ∨ p n } [[p 1 ; . . . ; p n ]] =⇒ False.</figDesc><table><row><cell cols="2">This allows resolution to operate on comparatively small objects, and resolving two</cell></row><row><cell>clauses Γ</cell><cell>[[p</cell></row></table><note>ψ impE Let us use [[A 1 ; . . . ; A n ]] =⇒ B as a short hand for A 1 =⇒ . . . =⇒ A n =⇒ B 1 ; . . . ; p n ]] =⇒ False and Γ [[q 1 ; . . . ; q m</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_4"><head>Table 1 :</head><label>1</label><figDesc>Runtimes (in seconds) for MSC007-1.008</figDesc><table><row><cell>found by MiniSat</cell></row><row><cell>Problem Representation Proof Reconstruction (zChaff) Naive HOL 726.5 Separate Clauses 7.8 Sequent 1.2 CNF Sequent 0.5</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_5"><head>Table 2 :</head><label>2</label><figDesc>Runtimes (in seconds) for SATLIB problems, zChaff</figDesc><table><row><cell>Problem</cell><cell cols="2">Variables Clauses</cell><cell cols="4">zChaff (s) Proof (s) Resolutions Total (s)</cell></row><row><cell>c7552mul.miter</cell><cell>11282</cell><cell>69529</cell><cell>73</cell><cell>70</cell><cell>252200</cell><cell>145</cell></row><row><cell>6pipe</cell><cell>15800</cell><cell>394739</cell><cell>167</cell><cell>321</cell><cell>268808</cell><cell>512</cell></row><row><cell>6pipe 6 ooo</cell><cell>17064</cell><cell>545612</cell><cell>308</cell><cell>2575</cell><cell>870345</cell><cell>3179</cell></row><row><cell>7pipe</cell><cell>23910</cell><cell>751118</cell><cell>495</cell><cell>1132</cell><cell>357136</cell><cell>1768</cell></row><row><cell>Problem</cell><cell cols="2">Variables Clauses</cell><cell cols="4">MiniSat (s) Proof (s) Resolutions Total (s)</cell></row><row><cell>c7552mul.miter</cell><cell>11282</cell><cell>69529</cell><cell>25</cell><cell>49</cell><cell>908231</cell><cell>106</cell></row><row><cell>6pipe</cell><cell>15800</cell><cell>394739</cell><cell>x</cell><cell>-</cell><cell>-</cell><cell>-</cell></row><row><cell>6pipe 6 ooo</cell><cell>17064</cell><cell>545612</cell><cell>x</cell><cell>-</cell><cell>-</cell><cell>-</cell></row><row><cell>7pipe</cell><cell>23910</cell><cell>751118</cell><cell>x</cell><cell>-</cell><cell>-</cell><cell>-</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_6"><head>Table 3 :</head><label>3</label><figDesc>Runtimes (in seconds) for SATLIB problems, MiniSat DIMACS files, and we work entirely at the system's ML level, avoiding the usual user interface, to prove unsatisfiability.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_8"><head></head><label></label><figDesc>T ab 1 and T ab 2 are closed proof tables for S 1 and S 2</figDesc><table><row><cell>respectively, then</cell><cell>S T ab 1</cell><cell>Rule(H) or</cell><cell>S T ab 1 | T ab 2</cell><cell>Rule(H</cell></row></table><note>) denote the closed proof table for S defined in the obvious way. Moreover, R i (H) (i = 1, 2) denotes the set containing the swffs of S i which replaces H. For instance: R 1</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_9"><head></head><label></label><figDesc>then Tab returns the proof S π Rule(H), otherwise Tab returns NULL (r1); 3. If a C 2 -rule applies to S, then if there exists a C 2 -swff H such that H is a cle-swff and δ S H, then Tab returns (the proof) S. Otherwise, let H be a cle-swff such that σ δ S H, if there is any, otherwise let</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_10"><head></head><label></label><figDesc>If the previous points do not apply, then Tab carries out the following Points 4.1 and 4.2: 4.1 Let {H 1 , . . . H n } be all the C 3 -swffs in S. For i = 1, . . . , n, the following instructions are iterated: if there is no swff H j (j ∈ {1, . . . , i − 1}) and a permutation τ such that (S \{H j</figDesc><table><row><cell>NULL;</cell><cell>Tab returns the proof</cell><cell>S π 1 | π 2</cell><cell>Rule(H), otherwise (π 2 is NULL) Tab returns</cell></row></table><note>4. If a C 3 or C 4 -rule applies to S, then Tab proceeds as follows: if σ δ S £ S, then Tab returns NULL (r2). If for every p ∈ PV(S), Tp ∈ S or F c p ∈ S, then Tab returns S.</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_11"><head></head><label></label><figDesc>Tab returns the proof S π Let {H 1 , . . . H n } be all the C 4 -swffs in S. For i = 1, . . . , n, the following Points (4.2.1) and (4.2.2) are iterated. (4.2.1) If there is neither swff H j (j ∈ {1, . . . , i − 1}) nor a permutation τ such that</figDesc><table><row><cell>Rule(H i );</cell></row><row><cell>4.2</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_12"><head></head><label></label><figDesc>Thus by Point (a) applied to H , there exists a Kripke model K H = P H , ≤ H , ρ H , H such that ρ H £(S \{H }) c ∪R 1 (H ). By using τ we can translate K H into a model K H = P H , ≤ H , ρ H , H , where P H = P H , ≤ H =≤ H , ρ H = ρ H and for every world α ∈ P H , if p ∈ PV(S), then α H τ (p) iff α H p. By definition of K H , it follows K H £ (S \ {H}) c ∪ R 1 (H)); (ii) for every H ∈ S ∩ C 4 , we have two cases: (iia)Tab((S \ {H}) c ∪ R 1 (H)) = NULL, thus by induction hypothesis there exists a Kripke model K H</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_13"><head></head><label></label><figDesc>then τ is used. Otherwise PITP considers that no permutation exists and solves (S \ {H }) c ∪ R 1 (H ). Analogously for C 4 -swffs. Since our search is narrow, many permutations are disregarded. This problem is made worse by the fact that conjunctions and disjunctions are not implemented as lists of formulas. Thus at present this optimization applied only to two families of formulas of ILTP v1.1.1 library. Finally PITP does not implement the search for a permutation in Points 3 and 6 of Tab. Despite the fact that some optimizations are</figDesc><table><row><cell></cell><cell></cell><cell></cell><cell cols="7">ft Prolog ft C LJT STRIP PITP</cell></row><row><cell></cell><cell></cell><cell>solved</cell><cell>188</cell><cell cols="2">199 175</cell><cell cols="2">202</cell><cell>215</cell></row><row><cell></cell><cell></cell><cell>(%)</cell><cell>68.6</cell><cell cols="2">72.6 63.9</cell><cell cols="2">73.7</cell><cell>78.5</cell></row><row><cell></cell><cell></cell><cell>proved</cell><cell>104</cell><cell cols="2">106 108</cell><cell cols="2">119</cell><cell>128</cell></row><row><cell></cell><cell></cell><cell>refuted</cell><cell>84</cell><cell>93</cell><cell>67</cell><cell cols="2">83</cell><cell>87</cell></row><row><cell></cell><cell></cell><cell>solved after:</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell></cell><cell></cell><cell>0-1s</cell><cell>173</cell><cell cols="2">185 166</cell><cell cols="2">178</cell><cell>190</cell></row><row><cell></cell><cell></cell><cell>1-10s</cell><cell>5</cell><cell>6</cell><cell>4</cell><cell cols="2">11</cell><cell>10</cell></row><row><cell></cell><cell></cell><cell>10-100s</cell><cell>6</cell><cell>7</cell><cell>2</cell><cell cols="2">11</cell><cell>9</cell></row><row><cell></cell><cell></cell><cell>100-600s</cell><cell>4</cell><cell>1</cell><cell>3</cell><cell>2</cell><cell></cell><cell>6</cell></row><row><cell></cell><cell></cell><cell>(&gt;600s)</cell><cell>86</cell><cell>75</cell><cell>47</cell><cell cols="2">43</cell><cell>58</cell></row><row><cell></cell><cell></cell><cell>errors</cell><cell>0</cell><cell>0</cell><cell>52</cell><cell cols="2">29</cell><cell>1</cell></row><row><cell cols="10">Table 2: ft Prolog, ft C, LJT, STRIP and PITP on ILTP v1.1.1 formulas</cell></row><row><cell></cell><cell>SYJ202+1</cell><cell>SYJ205+1</cell><cell>SYJ206+1</cell><cell>SYJ207+1</cell><cell cols="2">SYJ208+1</cell><cell cols="2">SYJ209+1</cell><cell>SYJ211+1</cell><cell>SYJ212+1</cell></row><row><cell></cell><cell>provable</cell><cell>provable</cell><cell>provable</cell><cell>refutable</cell><cell>refutable</cell><cell></cell><cell cols="2">refutable</cell><cell>refutable</cell><cell>refutable</cell></row><row><cell>ft Prolog</cell><cell>07 (516.55)</cell><cell>08 (60.26)</cell><cell>10 (144.5)</cell><cell>07 (358.05)</cell><cell cols="2">08 (65.41)</cell><cell cols="2">10 (543.09)</cell><cell>04 (66.62)</cell><cell>20 (0.01)</cell></row><row><cell>ft C</cell><cell>07 (76.3)</cell><cell>09 (85.84)</cell><cell>11 (481.98)</cell><cell>07 (51.13)</cell><cell cols="2">17 (81.41)</cell><cell cols="2">10 (96.99)</cell><cell>04 (17.25)</cell><cell>20 (0.01)</cell></row><row><cell>LJT</cell><cell>02 (0.09)</cell><cell>20 (0.01)</cell><cell>05 (0.01)</cell><cell>03 (2.64)</cell><cell>08 (0.18)</cell><cell></cell><cell cols="2">10 (461.27)</cell><cell>08 (546.46)</cell><cell>07 (204.98)</cell></row><row><cell>STRIP</cell><cell>06 (11.28)</cell><cell>14 (267.39)</cell><cell>20 (37.64)</cell><cell>04 (9.3)</cell><cell>06 (0.24)</cell><cell></cell><cell cols="2">10 (132.55)</cell><cell>09 (97.63)</cell><cell>20 (36.79)</cell></row><row><cell>PITP</cell><cell>09 (595.79)</cell><cell>20 (0.01)</cell><cell>20 (4.07)</cell><cell>04 (11.11)</cell><cell cols="2">08 (83.66)</cell><cell cols="2">10 (280.47)</cell><cell>20 (526.16)</cell><cell>11 (528.08)</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_14"><head>Table 3</head><label>3</label><figDesc></figDesc><table><row><cell>: ILTP v1.1.1 formulas solved by classes</cell></row><row><cell>missing, results in Table 2 (this table is taken from http://www.iltp.de/download/-</cell></row><row><cell>ILTP-v1.1.1-prop-comparison.txt) shows that PITP outperforms the known theo-</cell></row></table><note>rem provers on ILTP v1.1.1 library. Within 10 minutes PITP decides 215 out of 274 formulas of ILTP v1.1.1 The library divides the formulas in several families. Every family contains formulas sharing the same pattern of increasing complexity. In Table3</note></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_15"><head>Table 4 :</head><label>4</label><figDesc>Comparison between PITP optimizations (this table is taken from http://www.iltp.de/) for every family (some families of ILTP v1.1.1 are missing because they are decided within 1s by all provers) we report the index of the largest formula which every prover is able to decide within 600s CPU time and in parenthesis the CPU time necessary to solve such a formula.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_17"><head>Table 1 :</head><label>1</label><figDesc>Representative timing results for the first ten iterations of the first-order value iteration with the normalization procedure switched on or off.</figDesc><table><row><cell>N</cell><cell cols="2">Number of states</cell><cell cols="2">Time, msec</cell><cell>Runtime, msec</cell><cell>Runtime w/o norm, msec</cell></row><row><cell>0</cell><cell>Supdate 9</cell><cell>Snorm 6</cell><cell cols="2">Update Norm 144 1</cell><cell>145</cell><cell>144</cell></row><row><cell>1</cell><cell>24</cell><cell>14</cell><cell>393</cell><cell>3</cell><cell>396</cell><cell>593</cell></row><row><cell>2</cell><cell>94</cell><cell>23</cell><cell>884</cell><cell>12</cell><cell>896</cell><cell>2219</cell></row><row><cell>3</cell><cell>129</cell><cell>33</cell><cell>1377</cell><cell>16</cell><cell>1393</cell><cell>13293</cell></row><row><cell>4</cell><cell>328</cell><cell>39</cell><cell>2079</cell><cell>46</cell><cell>2125</cell><cell>77514</cell></row><row><cell>5</cell><cell>361</cell><cell>48</cell><cell>2519</cell><cell>51</cell><cell>2570</cell><cell>805753</cell></row><row><cell>6</cell><cell>604</cell><cell>52</cell><cell>3268</cell><cell>107</cell><cell>3375</cell><cell>n/a</cell></row><row><cell>7</cell><cell>627</cell><cell>54</cell><cell>3534</cell><cell>110</cell><cell>3644</cell><cell>n/a</cell></row><row><cell>8</cell><cell>795</cell><cell>56</cell><cell>3873</cell><cell>157</cell><cell>4030</cell><cell>n/a</cell></row><row><cell>9</cell><cell>811</cell><cell>59</cell><cell>4131</cell><cell>154</cell><cell>4285</cell><cell>n/a</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_18"><head>Table 2 :</head><label>2</label><figDesc>Performance comparison of FluCaP (denoted as FluCaP) and symbolic LAO * (denoted as LAO*), where the cells n/a denote the fact that a planner did not deliver a solution within the time limit of one hour. NAS and NGS are number of abstract and ground states, respectively.</figDesc><table><row><cell cols="2">Problem B C</cell><cell>Total av. reward LAO* FluCaP FOVI FluCaP -</cell><cell>LAO*</cell><cell>Total time, sec. FluCaP FOVI</cell><cell>FluCaP -</cell><cell cols="2">H.time, sec. LAO* FluCaP</cell><cell>LAO*</cell><cell>NAS FluCaP</cell><cell>FOVI</cell><cell>NGS, ×10 3 % LAO* FluCaP FluCaP</cell></row><row><cell></cell><cell>4</cell><cell cols="3">494 494 494 494 22.3 22.0 23.4</cell><cell>31.1</cell><cell>8.7</cell><cell>4.2</cell><cell>35</cell><cell>410</cell><cell>1077 0.86 0.82</cell><cell>2.7</cell></row><row><cell>5</cell><cell>3</cell><cell cols="3">496 495 495 496 23.1 17.8 22.7</cell><cell>25.1</cell><cell>9.5</cell><cell>1.3</cell><cell>34</cell><cell>172</cell><cell>687</cell><cell>0.86 0.68 2.1</cell></row><row><cell></cell><cell>2</cell><cell cols="3">496 495 495 495 27.3 11.7 15.7</cell><cell>16.5</cell><cell>12.7</cell><cell>0.3</cell><cell>32</cell><cell>55</cell><cell>278</cell><cell>0.86 0.66</cell><cell>1.9</cell></row><row><cell></cell><cell>4</cell><cell cols="4">493 493 493 493 137.6 78.5 261.6 285.4</cell><cell cols="4">76.7 21.0 68 1061</cell><cell>3847 7.05 4.24</cell><cell>3.1</cell></row><row><cell>6</cell><cell>3</cell><cell cols="5">493 492 493 492 150.5 33.0 119.1 128.5 85.0</cell><cell>9.3</cell><cell>82</cell><cell>539</cell><cell>1738 7.05 6.50</cell><cell>2.3</cell></row><row><cell></cell><cell>2</cell><cell cols="3">495 494 495 496 221.3 16.6 56.4</cell><cell>63.3</cell><cell>135.0</cell><cell>1.2</cell><cell>46</cell><cell>130</cell><cell>902</cell><cell>7.05 6.24 2.0</cell></row><row><cell></cell><cell>4</cell><cell cols="3">492 491 491 491 1644 198.1 2776</cell><cell>n/a</cell><cell cols="4">757.0 171.3 143 2953</cell><cell>12014 65.9 23.6</cell><cell>3.5</cell></row><row><cell>7</cell><cell>3</cell><cell cols="4">494 494 494 494 1265 161.6 1809 2813</cell><cell cols="5">718.3 143.6 112 2133 7591 65.9 51.2</cell><cell>2.4</cell></row><row><cell></cell><cell>2</cell><cell cols="8">494 494 494 494 2210 27.3 317.7 443.6 1241 12.3 101 425</cell><cell>2109 65.9 61.2</cell><cell>2.0</cell></row><row><cell></cell><cell>4</cell><cell cols="3">n/a 490 n/a n/a n/a 1212 n/a</cell><cell>n/a</cell><cell cols="4">n/a 804.1 n/a 8328</cell><cell>n/a n/a 66.6</cell><cell>4.1</cell></row><row><cell>8</cell><cell>3</cell><cell cols="3">n/a 490 n/a n/a n/a 598.5 n/a</cell><cell>n/a</cell><cell cols="4">n/a 301.2 n/a 3956</cell><cell>n/a n/a 379.7 3.0</cell></row><row><cell></cell><cell>2</cell><cell cols="3">n/a 492 n/a n/a n/a 215.3 1908</cell><cell>n/a</cell><cell cols="4">n/a 153.2 n/a 2019</cell><cell>7251 n/a 1121</cell><cell>2.3</cell></row><row><cell cols="2">15 3 17 4</cell><cell cols="3">n/a 486 n/a n/a n/a 1809 n/a n/a 481 n/a n/a n/a 3548 n/a</cell><cell>n/a n/a</cell><cell cols="4">n/a 1733 n/a 7276 n/a 1751 n/a 15225</cell><cell>n/a n/a 1.2 • 10 7 5.7 n/a n/a 2.5 • 10 7 6.1</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_20"><head>Table 3 :</head><label>3</label><figDesc>Performance of FluCaP on larger instances of one-color Blocksworld problems,</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_21"><head></head><label></label><figDesc>t 1 , . . . , t n ) and o 1 = t π 1 and o 2 = t π 2 } with o 1 , o 2 ∈ Obj (Z), f (t 1 , . . . , t n ) being a fluent and π 1 , π 2 being positions of objects o 1 , o 2 in f . The labeling function (o) = {f |Z contains f (o)} associates each object o with a unary fluent name f this object belongs to. The object occurrence graph for the state Z from our running example will contain three vertices X, Y and table with labels {r, h, w}, {b, h, l} and {}, resp., and two edges (X, 1, on, 2, Y ) and (Y, 1, on, 2, table). The object context ObjCon(o, Z, d) of depth d &gt; 0 is defined for each object o of a state Z as a chain of labels: (o)</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_22"><head>Table 1 :</head><label>1</label><figDesc>solved the ten par32* instances by enhancing Satz's search process with equivalency reasoning. Ostrowski et Dew Satz and MINISAT performance, before and after preprocessing, on par32 problem.</figDesc><table><row><cell>Instance</cell><cell>Prep.</cell><cell cols="2">#Vars/#Cls/#Lits Ptime</cell><cell cols="2">Dew Satz</cell><cell cols="2">MINISAT</cell></row><row><cell></cell><cell></cell><cell></cell><cell></cell><cell>Stime</cell><cell>#BackT</cell><cell>Stime</cell><cell>#Conflict</cell></row><row><cell>par32-1</cell><cell>Orig</cell><cell>3176/10227/27501</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>2418/7463/19750</cell><cell>0.08</cell><cell>12,873</cell><cell>17,335,530</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1313/6193/17203</cell><cell>0.50</cell><cell>9,513</cell><cell>17,391,333</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1315/5948/16707</cell><cell>0.46</cell><cell>6,858</cell><cell>13,476,105</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res+Hyp</cell><cell>1313/5495/15810</cell><cell>0.11</cell><cell>9,655</cell><cell>17,335,492</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+3Res</cell><cell>849/5245/18660</cell><cell>0.37</cell><cell>14,729</cell><cell>34,569,968</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-2</cell><cell>Orig</cell><cell>3176/10253/27405</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>5,364</cell><cell>9,125,821</cell></row><row><cell></cell><cell>3Res</cell><cell>2392/7387/19550</cell><cell>0.08</cell><cell>5,171</cell><cell>9,341,185</cell><cell>6,205</cell><cell>10,492,612</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1301/5975/16719</cell><cell>0.36</cell><cell>3,831</cell><cell>8,186,883</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1303/5730/16223</cell><cell>0.29</cell><cell>1,518</cell><cell>3,889,345</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-3</cell><cell>Orig</cell><cell>3176/10297/27581</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>2395/7437/19738</cell><cell>0.07</cell><cell>6,124</cell><cell>9,711,576</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1323/5961/16779</cell><cell>0.23</cell><cell>3,673</cell><cell>9,708,520</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1325/5716/16283</cell><cell>0.22</cell><cell>4,470</cell><cell>9,710,552</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+3Res</cell><cell>848/5284/18878</cell><cell>0.37</cell><cell>3,647</cell><cell>2,206,369</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-4</cell><cell>Orig</cell><cell>3176/10313/27645</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>2385/7433/19762</cell><cell>0.08</cell><cell>10,425</cell><cell>10,036,154</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat</cell><cell>849/5160/18581</cell><cell>0.21</cell><cell>12,820</cell><cell>18,230,746</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1331/6055/16999</cell><cell>0.36</cell><cell>9,001</cell><cell>17,712,997</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res+Hyp</cell><cell>1331/5567/16026</cell><cell>0.11</cell><cell>5,741</cell><cell>10,036,146</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1333/5810/16503</cell><cell>0.34</cell><cell>6,099</cell><cell>10,036,154</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res+Niv</cell><cell>1290/5297/15481</cell><cell>0.10</cell><cell>14,003</cell><cell>25,092,756</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res+Sat</cell><cell>850/5286/18958</cell><cell>0.35</cell><cell>3,552</cell><cell>7,744,986</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+3Res</cell><cell>849/5333/19052</cell><cell>0.38</cell><cell>3,563</cell><cell>7,744,986</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>848/5154/18565</cell><cell>0.26</cell><cell>12,862</cell><cell>18,230,746</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-5</cell><cell>Orig</cell><cell>3176/10325/27693</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv</cell><cell>1978/7864/22535</cell><cell>0.03</cell><cell>10,651</cell><cell>27,165,469</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-1-c</cell><cell>Orig</cell><cell>1315/5254/15390</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>1315/5957/16738</cell><cell>0.35</cell><cell>11,068</cell><cell>25,920,943</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1313/6193/17203</cell><cell>0.48</cell><cell>7,419</cell><cell>8,931,149</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-2-c</cell><cell>Orig</cell><cell>1303/5206/15246</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>1303/5739/16254</cell><cell>0.23</cell><cell>428</cell><cell>345,680</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1301/5975/16719</cell><cell>0.32</cell><cell>7,402</cell><cell>8,166,758</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-3-c</cell><cell>Orig</cell><cell>1325/5294/15510</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>1325/5725/16314</cell><cell>0.15</cell><cell>4,482</cell><cell>9,462,205</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp</cell><cell>1323/5589/16094</cell><cell>0.04</cell><cell>11,745</cell><cell>19,947,965</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1323/5961/16779</cell><cell>0.22</cell><cell>4,375</cell><cell>9,462,245</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1321/5708/16266</cell><cell>0.24</cell><cell>7,361</cell><cell>16,265,438</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+3Res</cell><cell>802/5335/19335</cell><cell>0.33</cell><cell>5,407</cell><cell>7,280,963</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-4-c</cell><cell>Orig</cell><cell>1333/5326/15606</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>1333/5819/16534</cell><cell>0.24</cell><cell>7,097</cell><cell>8,440,212</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>1331/6055/16999</cell><cell>0.32</cell><cell>5,175</cell><cell>9,669,012</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1329/5802/16486</cell><cell>0.55</cell><cell>10,495</cell><cell>21,738,376</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat+3Res</cell><cell>806/5357/19443</cell><cell>0.32</cell><cell>10,110</cell><cell>8,013,977</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>par32-5-c</cell><cell>Orig</cell><cell>1339/5350/15678</cell><cell>n/a</cell><cell>10,949</cell><cell>22,878,571</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>1335/5728/16362</cell><cell>0.28</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>7,363</cell><cell>16,189,524</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_23"><head>Table 2 :</head><label>2</label><figDesc>Dew Satz and MINISAT performance, before and after preprocessing, on ferry planning problem.</figDesc><table><row><cell>Instance</cell><cell>Prep.</cell><cell cols="2">#Vars/#Cls/#Lits Ptime</cell><cell cols="2">Dew Satz</cell><cell cols="2">MINISAT</cell></row><row><cell></cell><cell></cell><cell></cell><cell></cell><cell>Stime</cell><cell>#BackT</cell><cell>Stime</cell><cell>#Conflict</cell></row><row><cell>ferry7 ks99i</cell><cell>Orig</cell><cell>1946/22336/45706</cell><cell>n/a</cell><cell>2,828</cell><cell>10,764,261</cell><cell>0.13</cell><cell>4,266</cell></row><row><cell></cell><cell>3Res</cell><cell>1930/22289/45621</cell><cell>0.09</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.11</cell><cell>3,707</cell></row><row><cell></cell><cell>Hyp</cell><cell>1881/32855/66732</cell><cell>0.21</cell><cell>1,672</cell><cell>1,204,321</cell><cell>0.03</cell><cell>417</cell></row><row><cell></cell><cell>Niv</cell><cell>1543/21904/45243</cell><cell>0.01</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.10</cell><cell>3,469</cell></row><row><cell></cell><cell>Sat</cell><cell>1286/21601/50644</cell><cell>0.33</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.07</cell><cell>2,763</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>1279/56597/120318</cell><cell>0.49</cell><cell>0.41</cell><cell>28</cell><cell>0.05</cell><cell>1,096</cell></row><row><cell>ferry7 v01i</cell><cell>Orig</cell><cell>1329/21688/50617</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.05</cell><cell>1,858</cell></row><row><cell></cell><cell>3Res</cell><cell>1329/21681/50505</cell><cell>0.14</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.05</cell><cell>1,858</cell></row><row><cell></cell><cell>Sat</cell><cell>1286/21609/50803</cell><cell>0.17</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.18</cell><cell>6,309</cell></row><row><cell></cell><cell>Sat+2Sim+3Res</cell><cell>1286/64472/136299</cell><cell>0.95</cell><cell>4.28</cell><cell>824</cell><cell>0.05</cell><cell>1,018</cell></row><row><cell></cell><cell>Sat+2Sim+Hyp+3Res</cell><cell>1272/62208/131357</cell><cell>1.26</cell><cell>3.30</cell><cell>580</cell><cell>0.10</cell><cell>2,398</cell></row><row><cell>ferry8 ks99a</cell><cell>Orig</cell><cell>1259/15259/31167</cell><cell>n/a</cell><cell>574</cell><cell>1,869,995</cell><cell>0.01</cell><cell>0</cell></row><row><cell></cell><cell>3Res</cell><cell>1241/15206/31071</cell><cell>0.08</cell><cell>654</cell><cell>2,074,794</cell><cell>0.01</cell><cell>0</cell></row><row><cell></cell><cell>Sat</cell><cell>813/14720/34687</cell><cell>0.24</cell><cell>810</cell><cell>1,040,528</cell><cell>0.01</cell><cell>381</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>813/35008/75263</cell><cell>0.35</cell><cell>0.11</cell><cell>4</cell><cell>0.02</cell><cell>295</cell></row><row><cell>ferry8 ks99i</cell><cell>Orig</cell><cell>2547/32525/66425</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.22</cell><cell>6,615</cell></row><row><cell></cell><cell>3Res</cell><cell>2529/32472/66329</cell><cell>0.12</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.14</cell><cell>3,495</cell></row><row><cell></cell><cell>Hyp</cell><cell>2473/48120/97601</cell><cell>0.32</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.07</cell><cell>1,030</cell></row><row><cell></cell><cell>Sat</cell><cell>1696/31589/74007</cell><cell>0.49</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.41</cell><cell>10,551</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>1683/83930/178217</cell><cell>0.76</cell><cell>9.38</cell><cell>3,255</cell><cell>0.20</cell><cell>5,105</cell></row><row><cell>ferry8 v01a</cell><cell>Orig</cell><cell>854/14819/34624</cell><cell>n/a</cell><cell>13,162</cell><cell>39,153,348</cell><cell>0.01</cell><cell>277</cell></row><row><cell></cell><cell>3Res</cell><cell>854/14811/34480</cell><cell>0.11</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>277</cell></row><row><cell></cell><cell>Hyp</cell><cell>846/38141/81268</cell><cell>0.18</cell><cell>6.11</cell><cell>570</cell><cell>0.02</cell><cell>226</cell></row><row><cell></cell><cell>Hyp+Sat</cell><cell>813/38044/81364</cell><cell>0.36</cell><cell>29.66</cell><cell>2,749</cell><cell>0.02</cell><cell>277</cell></row><row><cell></cell><cell>Hyp+Sat+3Res</cell><cell>813/38028/81196</cell><cell>0.70</cell><cell>0.71</cell><cell>15</cell><cell>0.02</cell><cell>277</cell></row><row><cell></cell><cell>Hyp+Sat+2Sim</cell><cell>813/36583/78442</cell><cell>0.50</cell><cell>0.17</cell><cell>0</cell><cell>0.02</cell><cell>233</cell></row><row><cell>ferry8 v01i</cell><cell>Orig</cell><cell>1745/31688/73934</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.55</cell><cell>12,935</cell></row><row><cell></cell><cell>3Res</cell><cell>1745/31680/73790</cell><cell>0.20</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.55</cell><cell>12,935</cell></row><row><cell></cell><cell>Sat</cell><cell>1696/31598/74202</cell><cell>0.25</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.25</cell><cell>7,869</cell></row><row><cell></cell><cell>Sat+2Sim+3Res</cell><cell>1696/96092/202904</cell><cell>1.50</cell><cell>268</cell><cell>68,681</cell><cell>4.06</cell><cell>28,690</cell></row><row><cell>ferry9 ks99a</cell><cell>Orig</cell><cell>1598/21427/43693</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>0</cell></row><row><cell></cell><cell>3Res</cell><cell>1578/21368/43586</cell><cell>0.10</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>0</cell></row><row><cell></cell><cell>Hyp</cell><cell>1542/29836/60522</cell><cell>0.21</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.02</cell><cell>278</cell></row><row><cell></cell><cell>Niv</cell><cell>1244/21046/43264</cell><cell>0.01</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>29</cell></row><row><cell></cell><cell>Sat</cell><cell>1042/20765/48878</cell><cell>0.33</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.02</cell><cell>350</cell></row><row><cell></cell><cell>2Sim</cell><cell>1569/20563/41976</cell><cell>0.02</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.04</cell><cell>1,359</cell></row><row><cell></cell><cell>Hyp+Sat</cell><cell>1056/26902/72553</cell><cell>0.88</cell><cell>33.73</cell><cell>22,929</cell><cell>0.03</cell><cell>609</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>1042/50487/108322</cell><cell>0.50</cell><cell>0.18</cell><cell>5</cell><cell>0.03</cell><cell>261</cell></row><row><cell>ferry9 v01a</cell><cell>Orig</cell><cell>1088/20878/48771</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>181</cell></row><row><cell></cell><cell>3Res</cell><cell>1088/20869/48591</cell><cell>0.16</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>181</cell></row><row><cell></cell><cell>Hyp</cell><cell>1079/55371/117757</cell><cell>0.28</cell><cell>0.42</cell><cell>0</cell><cell>0.03</cell><cell>187</cell></row><row><cell></cell><cell>Hyp+Sat</cell><cell>1042/55256/117861</cell><cell>0.49</cell><cell>70.83</cell><cell>5,080</cell><cell>0.03</cell><cell>181</cell></row><row><cell></cell><cell>Hyp+Sat+2Sim</cell><cell>1042/53394/114135</cell><cell>0.72</cell><cell>0.39</cell><cell>2</cell><cell>0.03</cell><cell>234</cell></row><row><cell>ferry10 ks99a</cell><cell>Orig</cell><cell>1977/29041/59135</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.03</cell><cell>710</cell></row><row><cell></cell><cell>3Res</cell><cell>1955/28976/59017</cell><cell>0.13</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.03</cell><cell>827</cell></row><row><cell></cell><cell>Hyp</cell><cell>1915/40743/82551</cell><cell>0.29</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.04</cell><cell>563</cell></row><row><cell></cell><cell>Niv</cell><cell>1544/28578/58619</cell><cell>0.02</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.01</cell><cell>0</cell></row><row><cell></cell><cell>Sat</cell><cell>1299/28246/66432</cell><cell>0.44</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.03</cell><cell>909</cell></row><row><cell></cell><cell>2Sim</cell><cell>1945/27992/57049</cell><cell>0.05</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.05</cell><cell>1,565</cell></row><row><cell></cell><cell>Sat+2Sim</cell><cell>1299/69894/149728</cell><cell>0.69</cell><cell>0.28</cell><cell>1</cell><cell>0.04</cell><cell>419</cell></row><row><cell></cell><cell>3Res+2Sim+Niv</cell><cell>1793/21099/43369</cell><cell>0.43</cell><cell>0.08</cell><cell>0</cell><cell>0.06</cell><cell>1,278</cell></row><row><cell></cell><cell>Niv+Hyp+2Sim+3Res</cell><cell>1532/24524/50463</cell><cell>0.54</cell><cell>5.19</cell><cell>3,949</cell><cell>0.02</cell><cell>454</cell></row><row><cell>ferry10 v01a</cell><cell>Orig</cell><cell>1350/28371/66258</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.02</cell><cell>191</cell></row><row><cell></cell><cell>3Res</cell><cell>1350/28361/66038</cell><cell>0.23</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.02</cell><cell>191</cell></row><row><cell></cell><cell>Hyp</cell><cell>1340/77030/163576</cell><cell>0.40</cell><cell>4.90</cell><cell>550</cell><cell>0.04</cell><cell>401</cell></row><row><cell></cell><cell>Hyp+Sat+3Res</cell><cell>1299/76874/163442</cell><cell>1.56</cell><cell>1,643</cell><cell>118,635</cell><cell>0.04</cell><cell>343</cell></row><row><cell></cell><cell>Hyp+Sat+2Sim</cell><cell>1299/74615/159134</cell><cell>1.00</cell><cell>1.78</cell><cell>61</cell><cell>0.04</cell><cell>459</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_24"><head>Table 3 :</head><label>3</label><figDesc>Dew Satz and MINISAT performance, before and after preprocessing, on hard BMC instances.</figDesc><table><row><cell>Instance</cell><cell>Prep.</cell><cell cols="2">#Vars/#Cls/#Lits Ptime</cell><cell cols="2">Dew Satz</cell><cell cols="2">MINISAT</cell></row><row><cell></cell><cell></cell><cell></cell><cell></cell><cell>Stime</cell><cell>#BackT</cell><cell>Stime</cell><cell>#Conflict</cell></row><row><cell>01-k10</cell><cell>Orig</cell><cell>9275/38802/98468</cell><cell>n/a</cell><cell>18.96</cell><cell>1,472</cell><cell>0.08</cell><cell>313</cell></row><row><cell></cell><cell>Hyp</cell><cell>n/a</cell><cell>1.27</cell><cell>n/a</cell><cell>n/a</cell><cell>n/a</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv</cell><cell>6662/33394/90715</cell><cell>0.16</cell><cell>4.12</cell><cell>366</cell><cell>0.07</cell><cell>327</cell></row><row><cell></cell><cell>3Res</cell><cell>6498/27318/70158</cell><cell>0.24</cell><cell>26.38</cell><cell>2,860</cell><cell>0.07</cell><cell>282</cell></row><row><cell></cell><cell>Sat</cell><cell>3418/19648/62925</cell><cell>0.84</cell><cell>3.46</cell><cell>140</cell><cell>0.03</cell><cell>262</cell></row><row><cell></cell><cell>2Sim</cell><cell>4379/59765/133585</cell><cell>3.41</cell><cell>0.24</cell><cell>1</cell><cell>0.05</cell><cell>135</cell></row><row><cell>01-k15</cell><cell>Orig</cell><cell>11524/48585/123966</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.49</cell><cell>3,743</cell></row><row><cell></cell><cell>3Res+Sat+</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell></cell><cell>Niv+Hyp</cell><cell>3382/25936/79364</cell><cell>4.65</cell><cell>1,420</cell><cell>130,013</cell><cell>0.06</cell><cell>682</cell></row><row><cell></cell><cell>3Res+Hyp+</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>4203/23731/60639</cell><cell>4.33</cell><cell>190</cell><cell>13,449</cell><cell>0.06</cell><cell>430</cell></row><row><cell></cell><cell>3Res+Hyp+</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell></cell><cell>3Res</cell><cell>4732/24972/63133</cell><cell>4.17</cell><cell>262</cell><cell>19,701</cell><cell>0.04</cell><cell>243</cell></row><row><cell></cell><cell>Hyp</cell><cell>4889/27056/71819</cell><cell>3.94</cell><cell>2,937</cell><cell>178,245</cell><cell>0.06</cell><cell>603</cell></row><row><cell></cell><cell>Niv</cell><cell>8068/41368/113317</cell><cell>0.19</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.49</cell><cell>3,548</cell></row><row><cell></cell><cell>3Res</cell><cell>9403/40059/103137</cell><cell>0.27</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.66</cell><cell>3,783</cell></row><row><cell></cell><cell>Sat</cell><cell>5198/30697/97961</cell><cell>1.13</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.32</cell><cell>3,655</cell></row><row><cell>01-k20</cell><cell>Orig</cell><cell>15069/63760/163081</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>4.95</cell><cell>16,658</cell></row><row><cell></cell><cell>3Res+Hyp+</cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell><cell></cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>6382/34846/89807</cell><cell>6.94</cell><cell>513</cell><cell>29,629</cell><cell>0.20</cell><cell>1,261</cell></row><row><cell></cell><cell>Hyp</cell><cell>7323/39150/104635</cell><cell>6.40</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.95</cell><cell>5,182</cell></row><row><cell></cell><cell>Niv</cell><cell>10533/54293/149192</cell><cell>0.25</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.28</cell><cell>2,069</cell></row><row><cell></cell><cell>3Res</cell><cell>12948/55490/142966</cell><cell>0.37</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.58</cell><cell>9,341</cell></row><row><cell></cell><cell>Sat</cell><cell>7179/42837/136537</cell><cell>1.52</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.11</cell><cell>8,705</cell></row><row><cell></cell><cell>2Sim</cell><cell>9370/93921/217635</cell><cell>1.91</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>0.35</cell><cell>1,977</cell></row><row><cell>26-k70</cell><cell>Orig</cell><cell>346561/1752741/4579945</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>8,382</cell><cell>2,654,614</cell></row><row><cell></cell><cell>3Res</cell><cell>346561/1756001/4588705</cell><cell>150</cell><cell>21.32</cell><cell>1</cell><cell>1.02</cell><cell>10</cell></row><row><cell></cell><cell>Hyp</cell><cell>243461/1569549/4182061</cell><cell>338</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.22</cell><cell>642</cell></row><row><cell></cell><cell>Niv</cell><cell>155221/1354556/4075072</cell><cell>479</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.07</cell><cell>492</cell></row><row><cell></cell><cell>Sat</cell><cell>132670/1300914/4980854</cell><cell>109</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>2,325</cell><cell>1,503,271</cell></row><row><cell>26-k75</cell><cell>Orig</cell><cell>371091/1877066/4904440</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>8,540</cell><cell>2,880,376</cell></row><row><cell></cell><cell>3Res</cell><cell>371091/1880536/4913780</cell><cell>161</cell><cell>22.81</cell><cell>1</cell><cell>1.06</cell><cell>11</cell></row><row><cell></cell><cell>Hyp</cell><cell>260621/1680704/4477966</cell><cell>364</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.20</cell><cell>654</cell></row><row><cell></cell><cell>Niv</cell><cell>166195/1450679/4364543</cell><cell>4.95</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.41</cell><cell>474</cell></row><row><cell></cell><cell>Sat</cell><cell>141870/1392526/5327557</cell><cell>117</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>3,896</cell><cell>2,067,948</cell></row><row><cell>26-k85</cell><cell>Orig</cell><cell>420151/2125716/5553430</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res</cell><cell>420151/2129606/5563930</cell><cell>183</cell><cell>25.43</cell><cell>1</cell><cell>1.21</cell><cell>10</cell></row><row><cell></cell><cell>Hyp</cell><cell>294941/1903014/5069776</cell><cell>417</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.55</cell><cell>747</cell></row><row><cell></cell><cell>Niv</cell><cell>187631/1641901/4941437</cell><cell>5.69</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.37</cell><cell>535</cell></row><row><cell></cell><cell>Sat</cell><cell>160270/1576770/6039510</cell><cell>132</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>4,472</cell><cell>2,308,225</cell></row><row><cell>26-k90</cell><cell>Orig</cell><cell>444681/2250041/5877925</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res</cell><cell>198605/2208074/6624608</cell><cell>121</cell><cell>13.21</cell><cell>1</cell><cell>1.77</cell><cell>5</cell></row><row><cell></cell><cell>Hyp+3Res</cell><cell>312101/1979389/5187311</cell><cell>600</cell><cell>46.53</cell><cell>1</cell><cell>1.39</cell><cell>5</cell></row><row><cell></cell><cell>3Res</cell><cell>444681/2254141/5889005</cell><cell>195</cell><cell>26.99</cell><cell>1</cell><cell>1.26</cell><cell>10</cell></row><row><cell></cell><cell>Hyp</cell><cell>312101/2014169/5365681</cell><cell>446</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.55</cell><cell>583</cell></row><row><cell></cell><cell>Niv</cell><cell>198605/1738024/5230908</cell><cell>5.91</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>1.38</cell><cell>429</cell></row><row><cell></cell><cell>Sat</cell><cell>169470/1669436/6402318</cell><cell>140</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>8,240</cell><cell>3,311,629</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_25"><head>Table 4 :</head><label>4</label><figDesc>Dew Satz and MINISAT performance, before and after preprocessing, on SAT2005 IBM-FV-* instances.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_27"><head>Table 5 :</head><label>5</label><figDesc>Dew Satz and MINISAT performance, without preprocessing, on FPGA routing problems.</figDesc><table /></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_28"><head></head><label></label><figDesc>Table7illustrates the difficulty of selecting the order in which to apply multiple preprocessors. It shows results on just two sample problems. The first is the bounded model checking problem BMC-IBM-12, which Dew Satz attempted with the three preprocessors HyPre, NiVER and 3-Resolution in different orders. Only one order, NiVER followed by HyPre followed by 3-Resolution, renders the problem feasible for Dew Satz. With the preprocessors in that order, it is solved in less than 2 minutes; with any other order it cannot be solved in more than four hours. The second problem, ferry10 ks99a, shows the range of different outcomes produced by varying the order of four preprocessors. If we get it right, we get a solution in 5 seconds, but we know of no simple rule for getting it right in such a case. Neither running NiVER first nor running 3-Resolution last is sufficient. Even with NiVER, HyPre and 3-Resolution in the right order, putting 2-SIMPLIFY first rather than third changes the runtime from 5 seconds to several hours. The third experiment illustrates the efffect of alternating two preprocessors. Simplifying</figDesc><table><row><cell>Instance</cell><cell>Prep.</cell><cell cols="2">#Vars/#Cls/#Lits Ptime</cell><cell cols="2">Dew Satz</cell><cell cols="2">MINISAT</cell></row><row><cell></cell><cell></cell><cell></cell><cell></cell><cell>Stime</cell><cell>#BackT</cell><cell>Stime</cell><cell>#Conflict</cell></row><row><cell>bart28</cell><cell>Orig</cell><cell>428/2907/7929</cell><cell>n/a</cell><cell>0.00</cell><cell>0</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat</cell><cell>413/2892/11469</cell><cell>0.06</cell><cell>0.02</cell><cell>0</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha</cell><cell>1825/8407/27003</cell><cell>0.37</cell><cell>0.06</cell><cell>9</cell><cell>198</cell><cell>775,639</cell></row><row><cell></cell><cell>Sha+3Res</cell><cell>1764/7702/24400</cell><cell>0.46</cell><cell>0.04</cell><cell>1</cell><cell>2,458</cell><cell>7,676,459</cell></row><row><cell></cell><cell>Sha+Hyp</cell><cell>1764/8349/26138</cell><cell>0.41</cell><cell>0.05</cell><cell>20</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha+Niv</cell><cell>1781/8358/26759</cell><cell>0.38</cell><cell>0.05</cell><cell>6</cell><cell>5.46</cell><cell>53,683</cell></row><row><cell></cell><cell>Sha+Sat</cell><cell>1728/8254/30422</cell><cell>0.53</cell><cell>0.10</cell><cell>0</cell><cell>115</cell><cell>684,272</cell></row><row><cell></cell><cell>Sha+2Sim</cell><cell>1750/7892/24682</cell><cell>0.39</cell><cell>0.05</cell><cell>17</cell><cell>19.12</cell><cell>150,838</cell></row><row><cell>bart30</cell><cell>Orig</cell><cell>485/3617/9954</cell><cell>n/a</cell><cell>0.31</cell><cell>20,160</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat</cell><cell>468/3600/14544</cell><cell>0.08</cell><cell>0.03</cell><cell>0</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha</cell><cell>2017/9649/30874</cell><cell>0.49</cell><cell>0.11</cell><cell>96</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha+3Res</cell><cell>1945/8686/27492</cell><cell>0.60</cell><cell>0.12</cell><cell>224</cell><cell>4,149</cell><cell>7,594,231</cell></row><row><cell></cell><cell>Sha+Hyp</cell><cell>1945/9348/29218</cell><cell>0.54</cell><cell>11,729</cell><cell>28,270,212</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha+Niv</cell><cell>1969/9599/30625</cell><cell>0.50</cell><cell>0.06</cell><cell>1</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha+Sat</cell><cell>1776/8830/33533</cell><cell>0.77</cell><cell>0.12</cell><cell>1</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sha+2Sim</cell><cell>1919/8758/27287</cell><cell>0.51</cell><cell>0.05</cell><cell>9</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>homer19</cell><cell>Orig</cell><cell>330/2340/4950</cell><cell>n/a</cell><cell>473</cell><cell>19,958,400</cell><cell>10,233</cell><cell>51,960,410</cell></row><row><cell></cell><cell>Sat</cell><cell>300/2310/8400</cell><cell>0.04</cell><cell>&gt;15,000</cell><cell>n/a</cell><cell>5,621</cell><cell>54,469,568</cell></row><row><cell></cell><cell>Sha</cell><cell>1460/6764/20242</cell><cell>0.16</cell><cell>2,345</cell><cell>4,828,639</cell><cell>2.26</cell><cell>33,492</cell></row><row><cell></cell><cell>Sha+3Res</cell><cell>1388/5748/16914</cell><cell>0.23</cell><cell>3,231</cell><cell>7,189,966</cell><cell>1.14</cell><cell>21,669</cell></row><row><cell></cell><cell>Sha+Hyp</cell><cell>1387/6547/18865</cell><cell>0.20</cell><cell>4,179</cell><cell>9,611,768</cell><cell>1.90</cell><cell>30,418</cell></row><row><cell></cell><cell>Sha+Niv</cell><cell>1412/6715/19993</cell><cell>0.17</cell><cell>2,570</cell><cell>5,202,084</cell><cell>3.15</cell><cell>45,484</cell></row><row><cell></cell><cell>Sha+Sat</cell><cell>1201/5846/19288</cell><cell>0.34</cell><cell>4,071</cell><cell>6,236,966</cell><cell>1.48</cell><cell>26,517</cell></row><row><cell></cell><cell>Sha+2Sim</cell><cell>1348/5639/16110</cell><cell>0.17</cell><cell>307</cell><cell>678,425</cell><cell>0.70</cell><cell>14,682</cell></row><row><cell>homer20</cell><cell>Orig</cell><cell>440/4220/8800</cell><cell>n/a</cell><cell>941</cell><cell>19,958,400</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Sat</cell><cell>400/4180/15200</cell><cell>0.08</cell><cell>1,443</cell><cell>6,982,425</cell><cell>11,448</cell><cell>57,302,582</cell></row><row><cell></cell><cell>Sha</cell><cell>1999/10340/29988</cell><cell>0.28</cell><cell>369</cell><cell>350,610</cell><cell>1.83</cell><cell>22,950</cell></row><row><cell></cell><cell>Sha+3Res</cell><cell>1907/8793/25027</cell><cell>0.37</cell><cell>362</cell><cell>405,059</cell><cell>1.41</cell><cell>18,273</cell></row><row><cell></cell><cell>Sha+Hyp</cell><cell>1905/10527/29129</cell><cell>0.34</cell><cell>1,306</cell><cell>1,451,567</cell><cell>1.10</cell><cell>13,927</cell></row><row><cell></cell><cell>Sha+Niv</cell><cell>1941/10276/29671</cell><cell>0.29</cell><cell>379</cell><cell>349,842</cell><cell>0.91</cell><cell>13,543</cell></row><row><cell></cell><cell>Sha+Sat</cell><cell>1723/9420/30986</cell><cell>0.54</cell><cell>822</cell><cell>300,605</cell><cell>1.00</cell><cell>13,831</cell></row><row><cell></cell><cell>Sha+2Sim</cell><cell>1879/9419/26188</cell><cell>0.31</cell><cell>114</cell><cell>120,297</cell><cell>0.40</cell><cell>6,612</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_29"><head>Table 6 :</head><label>6</label><figDesc>Dew Satz and MINISAT performance, before and after preprocessing, on selected FPGA routing instances.</figDesc><table><row><cell>Instance</cell><cell>Prep.</cell><cell cols="2">#Vars/#Cls/#Lits Ptime</cell><cell>Stime</cell><cell>#BackT</cell></row><row><cell>bmc-ibm-12</cell><cell>Hyp+3Res+Niv</cell><cell>10805/83643/204679</cell><cell>96.11</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+Hyp+3Res</cell><cell>12001/100114/253071</cell><cell>85.81</cell><cell>106</cell><cell>6</cell></row><row><cell></cell><cell>3Res+Hyp+Niv</cell><cell>10038/82632/221890</cell><cell>89.56</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>3Res+Niv+Hyp</cell><cell>11107/99673/269405</cell><cell>58.38</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell>ferry10 ks99a</cell><cell>2Sim+Niv+Hyp+3Res</cell><cell>1518/32206/65806</cell><cell>0.43</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>Niv+3Res+2Sim+Hyp</cell><cell>1532/25229/51873</cell><cell>0.49</cell><cell>11,345</cell><cell>17,778,483</cell></row><row><cell></cell><cell>3Res+2Sim+Niv+Hyp</cell><cell>1793/20597/42365</cell><cell>0.56</cell><cell>907</cell><cell>1,172,964</cell></row><row><cell></cell><cell>Niv+Hyp+2Sim+3Res</cell><cell>1532/24524/50463</cell><cell>0.54</cell><cell>5.19</cell><cell>3,949</cell></row><row><cell>ferry10 ks99a</cell><cell>2Sim+Niv</cell><cell>1518/27554/56565</cell><cell>0.08</cell><cell>&gt;15,000</cell><cell>n/a</cell></row><row><cell></cell><cell>2Sim+Niv+2Sim</cell><cell>1518/18988/39433</cell><cell>0.27</cell><cell>3,197</cell><cell>6,066,241</cell></row><row><cell></cell><cell>2Sim+Niv+2Sim+Niv</cell><cell>1486/18956/39429</cell><cell>0.29</cell><cell>129</cell><cell>290,871</cell></row><row><cell></cell><cell>2Sim+Niv+2Sim+Niv+2Sim</cell><cell>1486/23258/48033</cell><cell>0.48</cell><cell>7,355</cell><cell>8,216,100</cell></row></table></figure>
<figure xmlns="http://www.tei-c.org/ns/1.0" type="table" xml:id="tab_30"><head>Table 7 :</head><label>7</label><figDesc>Dew Satz's performance on instances with preprocessor ordering.</figDesc><table /></figure>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_0">This is an understated version of "The prover stops cold".</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_1">The LEO-II project at Cambridge University has just started (in October</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2006" xml:id="foot_2">). The project is funded by EPSRC under grant EP/D070511/1.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="3" xml:id="foot_3">We present a simple first order example here, since the the treatment of bound variables is special and is described later in Section 3.4.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="4" xml:id="foot_4">The data structure of PSTs can not only be partial, its structure can also exceed the syntax tree of terms as defined in Section 2 if all three children of a node are nonempty. This may be the case when using PSTs to represent coordinates, that is term positions which occur in some term. This is used when building the index as described in Section 3.3, which is similar to Stickel's path indexing and coordinate indexing methods<ref type="bibr" target="#b33">[Sti89,</ref><ref type="bibr" target="#b24">McC92]</ref>.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="5" xml:id="foot_5">In the current implementation, both occurring symbols and nonprimitive subterms are indexed. However, we plan to further evaluate the tradeoff between the speedup gained this way and the cost for maintenance of the index. Depending on this evaulation, we may want to restrict the nonprimitive subterms to be indexed, for example, by using their size as a criterion. Similar ideas have been examined by McCune<ref type="bibr" target="#b24">[McC92]</ref>, for example, the effect of limitations on the length of paths used in path indexing.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="1" xml:id="foot_6">Other extension principles include the introduction of Skolem (choice) functions and specification of a formula as an axiom. The latter is discouraged since one can introduce unsoundness by adding arbitrary axioms. For this paper, we will ignore the possibility of introducing arbitrary axioms.</note>
			<note xmlns="http://www.tei-c.org/ns/1.0" place="foot" n="2" xml:id="foot_7">The formal definition of a clause processor is somewhat more complex. In particular, it can optionally take as argument the current ACL2 state among others, and return, in addition to the list of clauses, an error message and possibly a new ACL2 state. We will ignore such details in this paper.</note>
		</body>
		<back>

			<div type="acknowledgement">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgements</head><p>We wish to thank the following for their contribution to the success of this conference: Air Force Office of Scientific Research, Asian Office of Aerospace Research and Development.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgements</head><p>The authors thank Peter Dillinger, Robert Krug, Pete Manolios, John Matthews, and Rob Sumners for comments and feedback.</p><p>Acknowledgments. The author would like to thank Markus Wenzel for several good ideas and his extensive help with tuning the implementation, and Hasan Amjad for more good ideas and the challenge that he posed. The clause representations used in this paper were suggested to me by various people, including Hasan Amjad, John Harrison, John Matthews, and Markus Wenzel.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgements</head><p>We thank anonymous reviewers for their comments on the previous versions of this paper. We also thank Zhengzhu Feng for fruitful discussions and for providing us with the executable of the symbolic LAO * planner. Olga Skvortsova was supported by a grant within the Graduate Programme GRK 334 "Specification of discrete processes and systems of processes by operational models and logics" under auspices of the Deutsche Forschungsgemeinschaft (DFG).</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Acknowledgments</head><p>This work was funded by National ICT Australia (NICTA). National ICT Australia is funded through the Australian Government's Backing Australia's Ability initiative, in part through the Australian Research Council.</p></div>
			</div>


			<div type="funding">
<div xmlns="http://www.tei-c.org/ns/1.0"><p>* This material is based upon work supported by DARPA and the National Science Foundation under Grant No. CNS-0429591, by the National Science Foundation under Grant No. ISS-0417413, and by DARPA under Contract No. NBCH30390004.</p></div>
			</div>

			<div type="annex">
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Multiple Preprocessing for Systematic SAT Solvers</head><p>Anbulagan 1 , John Slaney 1,2 1 Logic and Computation Program, National ICT Australia Ltd. 2 Computer Sciences Laboratory, Australian National University {anbulagan, john.slaney}@nicta.com.au</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head>Abstract</head><p>High-performance SAT solvers based on systematic search generally use either conflict driven clause learning (CDCL) or lookahead techniques to gain efficiency. Both styles of reasoning can gain from a preprocessing phase in which some form of deduction is used to simplify the problem. In this paper we undertake an empirical examination of the effects of several recently proposed preprocessors on both CDCL and lookahead-based SAT solvers. One finding is that the use of multiple preprocessors one after the other can be much more effective than using any one of them alone, but that the order in which they are applied is significant. We intend our results to be particularly useful to those implementing new preprocessors and solvers. with 2-SIMPLIFY followed by NiVER is insufficient to allow solution before the timeout. Simplifying again with 2-SIMPLIFY brings the runtime down to under an hour; adding NiVER again brings it down again to a couple of minutes; repeating 2-SIMPLIFY, far from improving matters, causes the time to blow out to two hours.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6">Conclusions</head><p>We performed an empirical study of the effects of several recently proposed SAT preprocessors on both CDCL and lookahead-based SAT solvers. We describe several outcomes from this study as follow.</p><p>1. High-performance SAT solvers, whether they depend on clause learning or on lookahead, benefit greatly from preprocessing. Improvements of four orders of magnitude in runtimes are not uncommon.</p><p>2. It is unlikely to equip a SAT solver with just one preprocessor of the kind considered in this paper. Very different preprocessing techniques are appropriate to different problem classes.</p><p>3. There are frequently benefits to be gained from running two or more preprocessors in series on the same problem instance.</p><p>4. Both clause learning and lookahead need to be enhanced with techniques specific to reasoning with binary clauses, in order to exploit dependency chains, and with techniques for equality reasoning.</p><p>5. Lookahead-based solvers also benefit greatly from resolution between longer clauses, as in the 3-Resolution preprocessor. This seems to capture ahead of the search some of the inferences which would be achieved during it by learning clauses. CDCL solvers can also benefit from 3-Resolution preprocessor-dramatically in certain instances-but the effects are far from uniform.</p></div>
<div xmlns="http://www.tei-c.org/ns/1.0"><head n="6.1">Future work</head><p>The following lines of research are open:</p><p>1. It would, of course, be easy if tedious to extend the experiments to more problem sets, more preprocessors and especially to more solvers. We shall probably look at some more DPLL solvers, but do not expect the results to add much more than detail to what is reported in the present paper. One of the more important additions to the class of solvers will be a non-clausal (Boolean circuit) reasoner.</p><p>We have not yet experimented with such a solver. We have already investigated preprocessing for several state of the art SLS (stochastic local search) solvers, but that is such a different game that we regard it as a different experiment and do not report it here.</p><p>2. The more important line of research is to investigate methods for automatically choosing among the available preprocessors for a given problem instance, and</p></div>			</div>
			<div type="references">

				<listBibl>

<biblStruct xml:id="b0">
	<analytic>
		<title level="a" type="main">DISCOUNT: A System for Distributed Equational Deduction</title>
		<author>
			<persName><forename type="first">J</forename><surname>Avenhaus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Denzinger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Fuchs</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 6th RTA, Kaiserslautern</title>
				<editor>
			<persName><forename type="first">J</forename><surname>Hsiang</surname></persName>
		</editor>
		<meeting>of the 6th RTA, Kaiserslautern</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1995">1995</date>
			<biblScope unit="volume">914</biblScope>
			<biblScope unit="page" from="397" to="402" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b1">
	<analytic>
		<title level="a" type="main">On Restrictions of Ordered Paramodulation with Simplification</title>
		<author>
			<persName><forename type="first">L</forename><surname>Bachmair</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Ganzinger</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 10th CADE, Kaiserslautern</title>
				<editor>
			<persName><forename type="first">M</forename><forename type="middle">E</forename><surname>Stickel</surname></persName>
		</editor>
		<meeting>of the 10th CADE, Kaiserslautern</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1990">1990</date>
			<biblScope unit="volume">449</biblScope>
			<biblScope unit="page" from="427" to="441" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b2">
	<analytic>
		<title level="a" type="main">Rewrite-Based Equational Theorem Proving with Selection and Simplification</title>
		<author>
			<persName><forename type="first">L</forename><surname>Bachmair</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Ganzinger</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Logic and Computation</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="issue">4</biblScope>
			<biblScope unit="page" from="217" to="247" />
			<date type="published" when="1994">1994</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b3">
	<analytic>
		<title level="a" type="main">Equational Reasoning in Saturation-Based Theorem Proving</title>
		<author>
			<persName><forename type="first">L</forename><surname>Bachmair</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Ganzinger</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automated Deduction -A Basis for Applications</title>
		<title level="s">Applied Logic Series</title>
		<editor>
			<persName><forename type="first">W</forename><surname>Bibel</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><forename type="middle">H</forename><surname>Schmitt</surname></persName>
		</editor>
		<imprint>
			<publisher>Kluwer Academic Publishers</publisher>
			<date type="published" when="1998">1998</date>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="page" from="353" to="397" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b4">
	<analytic>
		<title level="a" type="main">DISCOUNT: A Distributed and Learning Equational Prover</title>
		<author>
			<persName><forename type="first">J</forename><surname>Denzinger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kronenburg</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Schulz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="issue">18</biblScope>
			<biblScope unit="page" from="189" to="198" />
			<date type="published" when="1997">1997</date>
		</imprint>
	</monogr>
	<note>Special Issue on the CADE 13 ATP System Competition</note>
</biblStruct>

<biblStruct xml:id="b5">
	<analytic>
		<title level="a" type="main">The New Waldmeister Loop At Work</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Gaillourdet</surname></persName>
		</author>
		<author>
			<persName><surname>Th</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Hillenbrand</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Löchner</surname></persName>
		</author>
		<author>
			<persName><surname>Spies</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 19th CADE</title>
				<editor>
			<persName><forename type="first">F</forename><surname>Bader</surname></persName>
		</editor>
		<meeting>of the 19th CADE<address><addrLine>Miami</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2003">2003</date>
			<biblScope unit="volume">2741</biblScope>
			<biblScope unit="page" from="317" to="321" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b6">
	<analytic>
		<title level="a" type="main">NP-Completeness of the Set Unification and Matching Problems</title>
		<author>
			<persName><forename type="first">D</forename><surname>Kapur</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Narendran</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 8th CADE, Oxford</title>
				<editor>
			<persName><forename type="first">J</forename><forename type="middle">H</forename><surname>Siekmann</surname></persName>
		</editor>
		<meeting>of the 8th CADE, Oxford</meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1986">1986</date>
			<biblScope unit="volume">230</biblScope>
			<biblScope unit="page" from="489" to="495" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b7">
	<analytic>
		<title level="a" type="main">Otter: The CADE-13 Competition Incarnations</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">W</forename><surname>Mccune</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Wos</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">18</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="211" to="220" />
			<date type="published" when="1997">1997</date>
		</imprint>
	</monogr>
	<note>Special Issue on the CADE 13 ATP System Competition</note>
</biblStruct>

<biblStruct xml:id="b8">
	<analytic>
		<title level="a" type="main">Paramodulation-Based Theorem Proving</title>
		<author>
			<persName><forename type="first">R</forename><surname>Nieuwenhuis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Rubio</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Handbook of Automated Reasoning, volume I</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Robinson</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</editor>
		<imprint>
			<publisher>Elsevier Science and MIT Press</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="page" from="371" to="443" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b9">
	<analytic>
		<title level="a" type="main">Vampire 1.1 (System Description)</title>
		<author>
			<persName><forename type="first">A</forename><surname>Riazanov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the 1st IJCAR</title>
				<editor>
			<persName><forename type="first">R</forename><surname>Goré</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Leitsch</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><surname>Nipkow</surname></persName>
		</editor>
		<meeting>of the 1st IJCAR<address><addrLine>Siena</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2001">2083. 2001</date>
			<biblScope unit="page" from="376" to="380" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b10">
	<analytic>
		<title level="a" type="main">Limited resource strategy in resolution theorem proving</title>
		<author>
			<persName><forename type="first">A</forename><surname>Riazanov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Symbolic Computation</title>
		<imprint>
			<biblScope unit="volume">36</biblScope>
			<biblScope unit="issue">1-2</biblScope>
			<biblScope unit="page" from="101" to="115" />
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b11">
	<analytic>
		<title level="a" type="main">E -A Brainiac Theorem Prover</title>
		<author>
			<persName><forename type="first">S</forename><surname>Schulz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of AI Communications</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="issue">2/3</biblScope>
			<biblScope unit="page" from="111" to="126" />
			<date type="published" when="2002">2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b12">
	<analytic>
		<title level="a" type="main">Simple and Efficient Clause Subsumption with Feature Vector Indexing</title>
		<author>
			<persName><forename type="first">S</forename><surname>Schulz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proc. of the IJCAR-2004 Workshop on Empirically Successful First-Order Theorem Proving</title>
				<editor>
			<persName><forename type="first">G</forename><surname>Sutcliffe</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">S</forename><surname>Schulz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><surname>Tammet</surname></persName>
		</editor>
		<meeting>of the IJCAR-2004 Workshop on Empirically Successful First-Order Theorem Proving<address><addrLine>Cork, Ireland</addrLine></address></meeting>
		<imprint>
			<publisher>Elsevier Science</publisher>
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
	<note>to appear</note>
</biblStruct>

<biblStruct xml:id="b13">
	<analytic>
		<title level="a" type="main">Explicit substitutions</title>
		<author>
			<persName><forename type="first">Martín</forename><surname>Abadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Luca</forename><surname>Cardelli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Pierre-Louis</forename><surname>Curien</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jean-Jacques</forename><surname>Lèvy</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Conference Record of the Seventeenth Annual ACM Symposium on Principles of Programming Languages</title>
				<meeting><address><addrLine>San Francisco, California</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="1990">1990</date>
			<biblScope unit="page" from="31" to="46" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b14">
	<monogr>
		<idno>X3.168-1989</idno>
		<title level="m">American national standard for information systems: database language -SQL: ANSI X3</title>
				<imprint>
			<publisher>pub-ANSI</publisher>
			<date type="published" when="1989">1992. October 3, 1989</date>
			<biblScope unit="page" from="135" to="1992" />
		</imprint>
		<respStmt>
			<orgName>American National Standards Institute</orgName>
		</respStmt>
	</monogr>
	<note>Revision and consolidation of ANSI X3.135-1989 and ANSI</note>
</biblStruct>

<biblStruct xml:id="b15">
	<analytic>
		<title level="a" type="main">LEO -a higher-order theorem prover</title>
		<author>
			<persName><forename type="first">Christoph</forename><surname>Benzmüller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Michael</forename><surname>Kohlhase</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 15th International Conference on Automated Deduction (CADE-15), number 1421 in LNAI</title>
				<editor>
			<persName><forename type="first">Claude</forename><surname>Kirchner</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Hélène</forename><surname>Kirchner</surname></persName>
		</editor>
		<meeting>the 15th International Conference on Automated Deduction (CADE-15), number 1421 in LNAI<address><addrLine>Lindau, Germany</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1998">1998</date>
			<biblScope unit="page" from="139" to="143" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b16">
	<analytic>
		<title level="a" type="main">A formulation of the simple theory of types</title>
		<author>
			<persName><forename type="first">Alonzo</forename><surname>Church</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Symbolic Logic</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="page" from="56" to="68" />
			<date type="published" when="1940">1940</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b17">
	<monogr>
		<title level="m" type="main">A linear spine calculus</title>
		<author>
			<persName><forename type="first">Iliano</forename><surname>Cervesato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Frank</forename><surname>Pfenning</surname></persName>
		</author>
		<idno>CMU-CS-97-125</idno>
		<imprint>
			<date type="published" when="1997">1997</date>
			<pubPlace>Pittsburgh, PA</pubPlace>
		</imprint>
	</monogr>
	<note type="report_type">Technical Report</note>
</biblStruct>

<biblStruct xml:id="b18">
	<analytic>
		<title level="a" type="main">Lambda-calculus notation with nameless dummies: a tool for automatic formula manipulation with application to the Church-Rosser theorem</title>
		<author>
			<persName><forename type="first">N</forename><forename type="middle">G</forename><surname>De Bruijn</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Indag. Math</title>
		<imprint>
			<biblScope unit="volume">34</biblScope>
			<biblScope unit="issue">5</biblScope>
			<biblScope unit="page" from="381" to="392" />
			<date type="published" when="1972">1972</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b19">
	<analytic>
		<title level="a" type="main">Lambda-calculi with explicit substitutions and composition which preserve beta-strong normalization</title>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">F</forename><surname>Maria</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Delia</forename><surname>Ferreira</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Laurence</forename><surname>Kesner</surname></persName>
		</author>
		<author>
			<persName><surname>Puel</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Algebraic and Logic Programming</title>
				<imprint>
			<date type="published" when="1996">1996</date>
			<biblScope unit="page" from="284" to="298" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b20">
	<analytic>
		<title level="a" type="main">Extending the tptp language to higherorder logic with automated parser generation</title>
		<author>
			<persName><forename type="first">Allen</forename><surname>Van Gelder</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Geoff</forename><surname>Sutcliffe</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of IJCAR</title>
				<meeting>IJCAR</meeting>
		<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b21">
	<analytic>
		<title level="a" type="main">A unification algorithm for typed λ-calculus</title>
		<author>
			<persName><forename type="first">G</forename><surname>Huet</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Theoretical Computer Science</title>
		<imprint>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="27" to="57" />
			<date type="published" when="1975">1975</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b22">
	<monogr>
		<title level="m" type="main">Grundlagen der Analysis</title>
		<author>
			<persName><forename type="first">Edmund</forename><surname>Yehezkel</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Landau</forename></persName>
		</author>
		<imprint>
			<date type="published" when="1930">1930</date>
			<publisher>Erstveröff</publisher>
			<pubPlace>Leipzig</pubPlace>
		</imprint>
	</monogr>
	<note>first edition</note>
</biblStruct>

<biblStruct xml:id="b23">
	<monogr>
		<title level="m" type="main">The Objective Caml system</title>
		<author>
			<persName><surname>Ldg + ; Xavier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Damien</forename><surname>Leroy</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jacques</forename><surname>Doligez</surname></persName>
		</author>
		<author>
			<persName><surname>Garrigue</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Remy</forename><surname>Didier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jerome</forename><surname>Vouillon</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2005-10">October 2005</date>
		</imprint>
	</monogr>
	<note>release 3</note>
</biblStruct>

<biblStruct xml:id="b24">
	<analytic>
		<title level="a" type="main">Experiments with discrimination-tree indexing and path indexing for term retrieval</title>
		<author>
			<persName><forename type="first">William</forename><surname>Mccune</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">9</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="147" to="167" />
			<date type="published" when="1992">1992</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b25">
	<monogr>
		<title level="m" type="main">The suspension notation for lambda terms and its use in metalanguage implementations</title>
		<author>
			<persName><forename type="first">G</forename><surname>Nadathur</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2002">2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b26">
	<analytic>
		<title level="a" type="main">On the evaluation of indexing techniques for theorem proving</title>
		<author>
			<persName><forename type="first">R</forename><surname>Nieuwenhuis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Hillenbrand</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Riazanov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of IJCAR 2001</title>
				<meeting>IJCAR 2001</meeting>
		<imprint>
			<publisher>Springer Verlag</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="volume">2083</biblScope>
			<biblScope unit="page" from="257" to="271" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b27">
	<analytic>
		<title level="a" type="main">Higher-order substitution tree indexing</title>
		<author>
			<persName><forename type="first">Brigitte</forename><surname>Pientka</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ICLP</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Catuscia</forename><surname>Palamidessi</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2003">2916. 2003</date>
			<biblScope unit="page" from="377" to="391" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b28">
	<monogr>
		<title level="m" type="main">Optimizing higher-order pattern unification</title>
		<author>
			<persName><forename type="first">B</forename><surname>Pientka</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Pfenning</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b29">
	<analytic>
		<title level="a" type="main">Term indexing</title>
		<author>
			<persName><forename type="first">I</forename><forename type="middle">V</forename><surname>Ramakrishnan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">C</forename><surname>Sekar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Andrei</forename><surname>Voronkov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Handbook of Automated Reasoning</title>
				<editor>
			<persName><forename type="first">John</forename><surname>Alan</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Robinson</forename></persName>
		</editor>
		<editor>
			<persName><forename type="first">Andrei</forename><surname>Voronkov</surname></persName>
		</editor>
		<imprint>
			<publisher>Elsevier and MIT Press</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="1853" to="1964" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b30">
	<analytic>
		<title level="a" type="main">The design and implementation of Vampire</title>
		<author>
			<persName><forename type="first">A</forename><surname>Riazanov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">AICOM</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="issue">2-3</biblScope>
			<biblScope unit="page" from="91" to="110" />
			<date type="published" when="2002-06">jun 2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b31">
	<analytic>
		<title level="a" type="main">E -A Brainiac Theorem Prover</title>
		<author>
			<persName><forename type="first">S</forename><surname>Schulz</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of AI Communications</title>
		<imprint>
			<biblScope unit="volume">15</biblScope>
			<biblScope unit="issue">2/3</biblScope>
			<biblScope unit="page" from="111" to="126" />
			<date type="published" when="2002">2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b32">
	<analytic>
		<title level="a" type="main">Higherorder unification revisited: Complete sets of transformations</title>
		<author>
			<persName><forename type="first">W</forename><surname>Snyder</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Gallier</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Symbolic Computation</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="101" to="114" />
			<date type="published" when="1989">1989</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b33">
	<analytic>
		<title level="a" type="main">The path-indexing method for indexing terms</title>
		<author>
			<persName><forename type="first">M</forename><surname>Stickel</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Artificial Intelligence Center, SRI International</title>
				<meeting><address><addrLine>Ravenswood Ave., Menlo Park, CA 94025</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1989">1989</date>
			<biblScope unit="volume">333</biblScope>
		</imprint>
	</monogr>
	<note type="report_type">Technical Report 473</note>
</biblStruct>

<biblStruct xml:id="b34">
	<analytic>
		<title level="a" type="main">Interfacing to computer algebra via term indexing</title>
		<author>
			<persName><forename type="first">Frank</forename><surname>Theiß</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Martin</forename><surname>Volker Sorge</surname></persName>
		</author>
		<author>
			<persName><surname>Pollet</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of Calculemus</title>
				<meeting>Calculemus</meeting>
		<imprint>
			<publisher>Elsevier</publisher>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b35">
	<monogr>
		<title level="m" type="main">Checking Landau&apos;s &quot;Grundlagen&quot; in the Automath system</title>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">S</forename><surname>Van Benthem</surname></persName>
		</author>
		<author>
			<persName><surname>Jutting</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1977">1977</date>
		</imprint>
		<respStmt>
			<orgName>Technische Hogeschool Eindhoven, Stichting Mathematisch Centrum</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
	<note>See also. Lan30</note>
</biblStruct>

<biblStruct xml:id="b36">
	<analytic>
		<title level="a" type="main">SPASS version 2.0</title>
		<author>
			<persName><surname>Wbh + ; Ch</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Weidenbach</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Brahm</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Hillenbrand</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Ch</forename><surname>Keen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Theobald</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of CADE 2002</title>
				<meeting>CADE 2002</meeting>
		<imprint>
			<date type="published" when="2002">2002</date>
			<biblScope unit="page" from="275" to="279" />
		</imprint>
	</monogr>
	<note>Topic</note>
</biblStruct>

<biblStruct xml:id="b37">
	<analytic>
		<title level="a" type="main">Combining WS1S and HOL</title>
		<author>
			<persName><forename type="first">D</forename><surname>Basin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Friedrich</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Frontiers of Combining Systems 2</title>
				<editor>
			<persName><forename type="first">D</forename><forename type="middle">M</forename><surname>Gabbay</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>De Rijke</surname></persName>
		</editor>
		<meeting><address><addrLine>Baldock, Herts, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Research Studies Press/Wiley</publisher>
			<date type="published" when="2000-02">February 2000</date>
			<biblScope unit="page" from="39" to="56" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b38">
	<analytic>
		<title level="a" type="main">Formally Specifying and Mechanically Verifying Programs for the Motorola Complex Arithmetic Processor DSP</title>
		<author>
			<persName><forename type="first">B</forename><surname>Brock</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 1997 International Conference on Computer Design: VLSI in Computers &amp; Processors (ICCD 1997)</title>
				<meeting>the 1997 International Conference on Computer Design: VLSI in Computers &amp; Processors (ICCD 1997)<address><addrLine>Austin, TX</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society Press</publisher>
			<date type="published" when="1997">1997</date>
			<biblScope unit="page" from="31" to="36" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b39">
	<analytic>
		<title level="a" type="main">Metafunctions: Proving them Correct and Using Them Efficiently as New Proof Procedure</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">S</forename><surname>Boyer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">The Correctness Problem in Computer Science</title>
				<editor>
			<persName><forename type="first">R</forename><forename type="middle">S</forename><surname>Boyer</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</editor>
		<meeting><address><addrLine>London, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Academic Press</publisher>
			<date type="published" when="1981">1981</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b40">
	<analytic>
		<title level="a" type="main">The PROSPER toolkit</title>
		<author>
			<persName><forename type="middle">A</forename><surname>Dcn + ; L</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Dennis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Collins</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Norrish</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Boulton</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Slind</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Robinson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">F</forename><surname>Gordon</surname></persName>
		</author>
		<author>
			<persName><surname>Melham</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 6th International Conference on Tools and Algorithms for Constructing Systems (TACAS 2000)</title>
				<editor>
			<persName><forename type="first">S</forename><surname>Graf</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Schwartbach</surname></persName>
		</editor>
		<meeting>the 6th International Conference on Tools and Algorithms for Constructing Systems (TACAS 2000)<address><addrLine>Berlin, Germany</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="volume">1785</biblScope>
			<biblScope unit="page" from="78" to="92" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b41">
	<analytic>
		<title level="a" type="main">Formal Verification of Microprocessors at AMD</title>
		<author>
			<persName><forename type="first">;</forename><forename type="middle">A</forename><surname>Fkr</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Flatau</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">F</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Reed</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">W</forename><surname>Russinoff</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Smith</surname></persName>
		</author>
		<author>
			<persName><surname>Sumners</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">4th International Workshop on Designing Correct Circuits (DCC 2002)</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Sheeran</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><forename type="middle">F</forename><surname>Melham</surname></persName>
		</editor>
		<meeting><address><addrLine>Grenoble, France</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2002-04">April 2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b42">
	<analytic>
		<title level="a" type="main">An embedding of the ACL2 logic in HOL</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Reynolds</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">6th International Workshop on the ACL2 Theorem Prover and Its Applications (ACL2 2006)</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Wilding</surname></persName>
		</editor>
		<meeting><address><addrLine>Seattle, WA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2006-08">August 2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b43">
	<analytic>
		<title level="a" type="main">An integration of HOL and ACL2</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Reynolds</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 6th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2006)</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Gupta</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</editor>
		<meeting>the 6th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2006)<address><addrLine>San Jose, CA</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2006-11">November 2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b44">
	<monogr>
		<title level="m">Introduction to HOL: A Theorem-Proving Environment for Higher-Order Logic</title>
				<editor>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><forename type="middle">F</forename><surname>Melham</surname></persName>
		</editor>
		<imprint>
			<publisher>Cambridge University Press</publisher>
			<date type="published" when="1993">1993</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b45">
	<analytic>
		<title level="a" type="main">Programming combinations of deduction and BDDbased symbolic calculation</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">LMS Journal of Computation and Mathematics</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="page" from="56" to="76" />
			<date type="published" when="2002">2002</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b46">
	<analytic>
		<title level="a" type="main">A Summary of Intrinsic Partitioning Verification</title>
		<author>
			<persName><forename type="first">D</forename><forename type="middle">A</forename><surname>Greve</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Richards</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Wilding</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">5th International Workshop on the ACL2 Theorem Prover and Its Applications (ACL2 2004)</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</editor>
		<meeting><address><addrLine>Austin, TX</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2004-11">November 2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b47">
	<analytic>
		<title level="a" type="main">Adding External Decision Procedures to HOL90 Securely</title>
		<author>
			<persName><forename type="first">E</forename><forename type="middle">L</forename><surname>Gunter</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Lecture Notes in Computer Science</title>
		<imprint>
			<biblScope unit="volume">1479</biblScope>
			<biblScope unit="page" from="143" to="152" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b48">
	<monogr>
		<title level="m" type="main">A Proposed Interface Logic for Verification Environments</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">D</forename><surname>Guttman</surname></persName>
		</author>
		<idno>M-91-19</idno>
		<imprint>
			<date type="published" when="1991-03">March 1991</date>
		</imprint>
		<respStmt>
			<orgName>The Mitre Corporation</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Technical Report</note>
</biblStruct>

<biblStruct xml:id="b49">
	<analytic>
		<title level="a" type="main">An LCF-Style Interface between HOL and First-Order Logic</title>
		<author>
			<persName><forename type="first">J</forename><surname>Hurd</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 18th International Conference on Automated Deduction (CADE 2002)</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Voronkov</surname></persName>
		</editor>
		<meeting>the 18th International Conference on Automated Deduction (CADE 2002)</meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2002">2002</date>
			<biblScope unit="volume">2392</biblScope>
			<biblScope unit="page" from="134" to="138" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b50">
	<monogr>
		<title level="m" type="main">Should We Begin a Stanrdization Process for Interface Logics?</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1992-01">January 1992</date>
			<publisher>CLI</publisher>
			<biblScope unit="volume">72</biblScope>
		</imprint>
	</monogr>
	<note type="report_type">Technical Report</note>
</biblStruct>

<biblStruct xml:id="b51">
	<analytic>
		<title level="a" type="main">Design Goals of ACL2</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Computational Logic Incorporated (CLI), 1717 West Sixth</title>
				<meeting><address><addrLine>Street, Suite 290; Austin, TX</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1994">1994</date>
			<biblScope unit="page">78703</biblScope>
		</imprint>
	</monogr>
	<note type="report_type">Technical Report 101</note>
</biblStruct>

<biblStruct xml:id="b52">
	<monogr>
		<title level="m" type="main">A Precise Description of the ACL2 Logic</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
		<ptr target="http://www.cs.utexas.edu/users/moore/-publications/km97.ps.gz" />
		<imprint>
			<date type="published" when="1997">1997</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b53">
	<analytic>
		<title level="a" type="main">Structured Theory Development for a Mechanized Logic</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">26</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="161" to="203" />
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b54">
	<monogr>
		<title level="m" type="main">ACL2 home page</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
		<ptr target="http://-www.cs.utexas.edu/users/moore/acl2" />
		<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b55">
	<monogr>
		<title level="m" type="main">Computer-Aided Reasoning: An Approach</title>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2000-06">June 2000</date>
			<publisher>Kluwer Academic Publishers</publisher>
			<pubPlace>Boston, MA</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b56">
	<analytic>
		<title level="a" type="main">Scalable Automated Verification via Exper-System Guided Transformations</title>
		<author>
			<persName><forename type="first">;</forename><forename type="middle">H</forename><surname>Mbp</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Mony</surname></persName>
		</author>
		<author>
			<persName><forename type="first">V</forename><surname>Baumgartner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Paruthi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Kanzelman</surname></persName>
		</author>
		<author>
			<persName><surname>Kuehlmann</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 5th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2004)</title>
				<editor>
			<persName><forename type="first">A</forename><forename type="middle">J</forename><surname>Hu</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><forename type="middle">K</forename><surname>Martin</surname></persName>
		</editor>
		<meeting>the 5th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2004)</meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2004">2004</date>
			<biblScope unit="volume">3312</biblScope>
			<biblScope unit="page" from="217" to="233" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b57">
	<analytic>
		<title level="a" type="main">A Mechanically Checked Proof of the Kernel of the AMD5K86 Floating-point Division Algorithm</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Lynch</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Computers</title>
		<imprint>
			<biblScope unit="volume">47</biblScope>
			<biblScope unit="issue">9</biblScope>
			<biblScope unit="page" from="913" to="926" />
			<date type="published" when="1998-09">September 1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b58">
	<analytic>
		<title level="a" type="main">Combining Model Checking and Deduction of I/O-Automata</title>
		<author>
			<persName><forename type="first">O</forename><surname>Müller</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Nipkow</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 1st International Workshop on Tools and Algorithms for the Construction and Analysis of Systems (TACAS 1995)</title>
				<editor>
			<persName><forename type="first">E</forename><surname>Brinksma</surname></persName>
		</editor>
		<meeting>the 1st International Workshop on Tools and Algorithms for the Construction and Analysis of Systems (TACAS 1995)<address><addrLine>Aarhus, Denmark</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="1995-05">May 1995</date>
			<biblScope unit="volume">1019</biblScope>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b59">
	<analytic>
		<title level="a" type="main">Experiments on Supporting Interactive Proof Using Resolution</title>
		<author>
			<persName><forename type="first">J</forename><surname>Meng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><forename type="middle">C</forename><surname>Paulson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 2nd International Joint Conference on Computer-Aided Reasoning (IJCAR 2004)</title>
				<editor>
			<persName><forename type="first">D</forename><forename type="middle">A</forename><surname>Basin</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Rusinowitch</surname></persName>
		</editor>
		<meeting>the 2nd International Joint Conference on Computer-Aided Reasoning (IJCAR 2004)</meeting>
		<imprint>
			<date type="published" when="2004">2004</date>
			<biblScope unit="volume">3097</biblScope>
			<biblScope unit="page" from="372" to="384" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b60">
	<analytic>
		<title level="a" type="main">Ivy: A Preprocessor and Proof Checker for First-Order Logic</title>
		<author>
			<persName><forename type="first">W</forename><surname>Mccune</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Shumsky</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Computer-Aided Reasoning: ACL2 Case Studies</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Manlolios</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</editor>
		<meeting><address><addrLine>Boston, MA</addrLine></address></meeting>
		<imprint>
			<publisher>Kluwer Academic Publishers</publisher>
			<date type="published" when="2000-06">June 2000</date>
			<biblScope unit="page" from="217" to="230" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b61">
	<analytic>
		<title level="a" type="main">Automatic Verification of Safety and Liveness of XScale-Like Processor Models Using WEB Refinements</title>
		<author>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Srinivasan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Design, Automation and Test in Europe</title>
				<meeting><address><addrLine>DATE; Paris, France</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society Press</publisher>
			<date type="published" when="2004">2004. 2004</date>
			<biblScope unit="page" from="168" to="175" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b62">
	<analytic>
		<title level="a" type="main">Refinement Maps for Efficient Verification of Processor Models</title>
		<author>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Srinivasan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Design, Automation and Test in Europe (DATE 2005)</title>
				<meeting><address><addrLine>Munich, Germany</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society Press</publisher>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="1304" to="1309" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b63">
	<monogr>
		<title level="m" type="main">The Isabelle Reference Manual</title>
		<author>
			<persName><forename type="first">L</forename><surname>Paulson</surname></persName>
		</author>
		<ptr target="http://www.cl.-cam.ac.uk/Research/HVG/Isabelle/dist/Isabelle2003/doc/ref.pdf" />
		<imprint/>
	</monogr>
</biblStruct>

<biblStruct xml:id="b64">
	<analytic>
		<title level="a" type="main">RTL Verification: A Floating Point Multiplier</title>
		<author>
			<persName><forename type="first">D</forename><surname>Russinoff</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Flatau</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Computer-Aided Reasoning: ACL2 Case Studies</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</editor>
		<meeting><address><addrLine>Boston, MA</addrLine></address></meeting>
		<imprint>
			<publisher>Kluwer Academic Publishers</publisher>
			<date type="published" when="2000-06">June 2000</date>
			<biblScope unit="page" from="201" to="232" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b65">
	<analytic>
		<title level="a" type="main">A SAT-Based Decision Procedure for the Subclass of Unrollable List Formulas in ACL2 (SULFA)</title>
		<author>
			<persName><forename type="first">E</forename><surname>Reeber</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 3rd International Joint Conference on Computer-Aided Reasoning (IJCAR 2006)</title>
				<editor>
			<persName><forename type="first">U</forename><surname>Furbach</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">N</forename><surname>Shankar</surname></persName>
		</editor>
		<meeting>the 3rd International Joint Conference on Computer-Aided Reasoning (IJCAR 2006)</meeting>
		<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="volume">4130</biblScope>
			<biblScope unit="page" from="453" to="467" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b66">
	<analytic>
		<title level="a" type="main">Certifying Compositional Model Checking Algorithms in ACL2</title>
		<author>
			<persName><forename type="first">S</forename><surname>Ray</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Matthews</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Tuttle</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">4th International Workshop on the ACL2 Theorem Prover and Its Applications (ACL2 2003)</title>
				<editor>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Kaufmann</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">S</forename><surname>Moore</surname></persName>
		</editor>
		<meeting><address><addrLine>Boulder, CO</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2003-07">July 2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b67">
	<analytic>
		<title level="a" type="main">An Integration of Model-Checking with Automated Proof Checking</title>
		<author>
			<persName><forename type="first">S</forename><surname>Rajan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Shankar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">K</forename><surname>Srivas</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 8th International Conference on Computer-Aided Verification (CAV &apos;95)</title>
				<editor>
			<persName><forename type="first">P</forename><surname>Wolper</surname></persName>
		</editor>
		<meeting>the 8th International Conference on Computer-Aided Verification (CAV &apos;95)</meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="1995">1995</date>
			<biblScope unit="volume">939</biblScope>
			<biblScope unit="page" from="84" to="97" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b68">
	<analytic>
		<title level="a" type="main">A Mechanically Checked Proof of IEEE Compliance of a Register-Transfer-Level Specification of the AMD-K7 Floating-point Multiplication, Division, and Square Root Instructions</title>
		<author>
			<persName><forename type="first">D</forename><surname>Russinoff</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">LMS Journal of Computation and Mathematics</title>
		<imprint>
			<biblScope unit="volume">1</biblScope>
			<biblScope unit="page" from="148" to="200" />
			<date type="published" when="1998-12">December 1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b69">
	<analytic>
		<title level="a" type="main">Trace Table Based Approach for Pipelined Microprocessor Verification</title>
		<author>
			<persName><forename type="first">J</forename><surname>Sawada</surname></persName>
		</author>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 9th International Conference on Computer-Aided Verification (CAV 1997)</title>
				<editor>
			<persName><forename type="first">O</forename><surname>Grumberg</surname></persName>
		</editor>
		<meeting>the 9th International Conference on Computer-Aided Verification (CAV 1997)</meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="1997">1997</date>
			<biblScope unit="volume">1254</biblScope>
			<biblScope unit="page" from="364" to="375" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b70">
	<analytic>
		<title level="a" type="main">Using Decision Procedures with Higher Order Logics</title>
		<author>
			<persName><forename type="first">N</forename><surname>Shankar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 14th International Conference on Theorem Proving in Higher-Order Logics (TPHOLS 2001)</title>
				<editor>
			<persName><forename type="first">R</forename><forename type="middle">J</forename><surname>Boulton</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><forename type="middle">B</forename><surname>Jackson</surname></persName>
		</editor>
		<meeting>the 14th International Conference on Theorem Proving in Higher-Order Logics (TPHOLS 2001)</meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="volume">2152</biblScope>
			<biblScope unit="page" from="5" to="26" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b71">
	<analytic>
		<title level="a" type="main">ACL2SIX: A hint used to integrate a theorem prover and an automated verification tool</title>
		<author>
			<persName><forename type="first">J</forename><surname>Sawada</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Reeber</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 6th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2006)</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Gupta</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><surname>Manolios</surname></persName>
		</editor>
		<meeting>the 6th International Conference on Formal Methods in Computer-Aided Design (FMCAD 2006)<address><addrLine>San Jose, CA</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2006-11">November 2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b72">
	<analytic>
		<title level="a" type="main">A SAT based approach for solving formulas over Boolean and linear mathematical propositions</title>
		<author>
			<persName><forename type="first">G</forename><surname>Audemard</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Bertoli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Cimatti</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Kornilowicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Sebastiani</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 18th International Conference on Automated Deduction (CADE-18)</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><forename type="first">Andrei</forename><surname>Voronkov</surname></persName>
		</editor>
		<meeting>the 18th International Conference on Automated Deduction (CADE-18)<address><addrLine>Copenhagen, Denmark</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2002-07">July 2002</date>
			<biblScope unit="volume">2392</biblScope>
			<biblScope unit="page" from="195" to="210" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b73">
	<analytic>
		<title level="a" type="main">Compressing propositional refutations</title>
		<author>
			<persName><forename type="first">Hasan</forename><surname>Amjad</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Sixth International Workshop on Automated Verification of Critical Systems (AVOCS &apos;06) -Preliminary Proceedings</title>
				<editor>
			<persName><forename type="first">Stephan</forename><surname>Merz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Tobias</forename><surname>Nipkow</surname></persName>
		</editor>
		<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="page" from="7" to="18" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b74">
	<analytic>
		<title level="a" type="main">A HOL/MiniSat interface</title>
		<author>
			<persName><forename type="first">Hasan</forename><surname>Amjad</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Personal communication</title>
				<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
	<note>September</note>
</biblStruct>

<biblStruct xml:id="b75">
	<analytic>
		<title level="a" type="main">CVC Lite: A new implementation of the cooperating validity checker</title>
		<author>
			<persName><forename type="first">Clark</forename><surname>Barrett</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sergey</forename><surname>Berezin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 16th International Conference on Computer Aided Verification (CAV 2004)</title>
				<meeting>the 16th International Conference on Computer Aided Verification (CAV 2004)<address><addrLine>Boston, Massachusetts, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2004-07">July 2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b76">
	<analytic>
		<title level="a" type="main">A proof-producing Boolean search engine</title>
		<author>
			<persName><forename type="first">Clark</forename><surname>Barrett</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sergey</forename><surname>Berezin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">David</forename><forename type="middle">L</forename><surname>Dill</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Workshop on Pragmatics of Decision Procedures in Automated Reasoning (PDPAR 2003)</title>
				<meeting>the Workshop on Pragmatics of Decision Procedures in Automated Reasoning (PDPAR 2003)<address><addrLine>Miami, Florida, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2003-07">July 2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b77">
	<monogr>
		<ptr target="ftp://dimacs.rutgers.edu/pub/challenge/satisfiability/doc" />
		<title level="m">DIMACS satisfiability suggested format</title>
				<imprint>
			<date type="published" when="1993">1993</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b78">
	<analytic>
		<title level="a" type="main">Geometric resolution: A proof procedure based on finite model search</title>
		<author>
			<persName><forename type="first">Hans</forename><surname>De</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Nivelle</forename></persName>
		</author>
		<author>
			<persName><forename type="first">Jia</forename><surname>Meng</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automated Reasoning -Third International Joint Conference, IJ-CAR 2006</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><forename type="first">Ulrich</forename><surname>Furbach</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Natarajan</forename><surname>Shankar</surname></persName>
		</editor>
		<meeting><address><addrLine>Seattle, WA, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2006-08">August 2006. 2006</date>
			<biblScope unit="volume">4130</biblScope>
			<biblScope unit="page" from="303" to="317" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b79">
	<analytic>
		<title level="a" type="main">An extensible SAT-solver</title>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Eén</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Sörensson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Theory and Applications of Satisfiability Testing, 6th International Conference, SAT 2003</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Enrico</forename><surname>Giunchiglia</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Armando</forename><surname>Tacchella</surname></persName>
		</editor>
		<meeting><address><addrLine>Santa Margherita Ligure, Italy</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2003">May 5-8, 2003. 2919. 2004</date>
			<biblScope unit="page" from="502" to="518" />
		</imprint>
	</monogr>
	<note>Selected Revised Papers</note>
</biblStruct>

<biblStruct xml:id="b80">
	<monogr>
		<title level="m" type="main">MiniSat-p-v1.14 -A proof-logging version of MiniSat</title>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Eén</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Sörensson</surname></persName>
		</author>
		<ptr target="http://www.cs.chalmers.se/Cs/Research/FormalMethods/MiniSat/" />
		<imprint>
			<date type="published" when="2006-09">September 2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b81">
	<analytic>
		<title level="a" type="main">Expressiveness + automation + soundness: Towards combining SMT solvers and interactive proof assistants</title>
		<author>
			<persName><forename type="first">Jean-Yves</forename><surname>Fmm + ; Pascal Fontaine</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Stephan</forename><surname>Marion</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Leonor</forename><forename type="middle">Prensa</forename><surname>Merz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Alwen</forename><surname>Nieto</surname></persName>
		</author>
		<author>
			<persName><surname>Tiu</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Tools and Algorithms for the Construction and Analysis of Systems, 12th International Conference, TACAS 2006 Held as Part of the Joint European Conferences on Theory and Practice of Software, ETAPS 2006</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Holger</forename><surname>Hermanns</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Jens</forename><surname>Palsberg</surname></persName>
		</editor>
		<meeting><address><addrLine>Vienna, Austria</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2006-04-02">March 25 -April 2, 2006. 2006</date>
			<biblScope unit="volume">3920</biblScope>
			<biblScope unit="page" from="167" to="181" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b82">
	<monogr>
		<title level="m" type="main">Introduction to HOL: A theorem proving environment for higher order logic</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><forename type="middle">F</forename><surname>Melham</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1993">1993</date>
			<publisher>Cambridge University Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b83">
	<analytic>
		<title level="a" type="main">From LCF to HOL: A short history</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proof, Language, and Interaction</title>
				<editor>
			<persName><forename type="first">G</forename><surname>Plotkin</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Colin</forename><forename type="middle">P</forename><surname>Stirling</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Mads</forename><surname>Tofte</surname></persName>
		</editor>
		<imprint>
			<publisher>MIT Press</publisher>
			<date type="published" when="2000">2000</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b84">
	<monogr>
		<title level="m" type="main">HolSatLib documentation, version 1.0b</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">J C</forename><surname>Gordon</surname></persName>
		</author>
		<ptr target="http://www.cl.cam.ac.uk/∼mjcg/HolSatLib/HolSatLib.html" />
		<imprint>
			<date type="published" when="2001-06">June 2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b85">
	<analytic>
		<title level="a" type="main">SATLIB: An online resource for research on SAT</title>
		<author>
			<persName><forename type="first">H</forename><surname>Holger</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Thomas</forename><surname>Hoos</surname></persName>
		</author>
		<author>
			<persName><surname>Stützle</surname></persName>
		</author>
		<ptr target="http://www.satlib.org/" />
	</analytic>
	<monogr>
		<title level="m">itors</title>
				<editor>
			<persName><forename type="first">Ian</forename><surname>Gent</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Toby</forename><surname>Hans Van Maaren</surname></persName>
		</editor>
		<editor>
			<persName><surname>Walsh</surname></persName>
		</editor>
		<meeting><address><addrLine>SAT</addrLine></address></meeting>
		<imprint>
			<publisher>IOS Press</publisher>
			<date type="published" when="2000">2000. 2000</date>
			<biblScope unit="page" from="283" to="292" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b86">
	<analytic>
		<title level="a" type="main">Integrating Gandalf and HOL</title>
		<author>
			<persName><forename type="first">Joe</forename><surname>Hurd</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Theorem Proving in Higher Order Logics, 12th International Conference</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Yves</forename><surname>Bertot</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Gilles</forename><surname>Dowek</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">André</forename><surname>Hirschowitz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Christine</forename><surname>Paulin</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Laurent</forename><surname>Théry</surname></persName>
		</editor>
		<meeting><address><addrLine>Nice, France</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1999-09">September 1999</date>
			<biblScope unit="volume">1690</biblScope>
			<biblScope unit="page" from="311" to="321" />
		</imprint>
	</monogr>
	<note>TPHOLs &apos;99</note>
</biblStruct>

<biblStruct xml:id="b87">
	<analytic>
		<title level="a" type="main">An LCF-style interface between HOL and first-order logic</title>
		<author>
			<persName><forename type="first">Joe</forename><surname>Hurd</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 18th International Conference on Automated Deduction (CADE-18)</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><forename type="first">Andrei</forename><surname>Voronkov</surname></persName>
		</editor>
		<meeting>the 18th International Conference on Automated Deduction (CADE-18)<address><addrLine>Copenhagen, Denmark</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2002-07">July 2002</date>
			<biblScope unit="volume">2392</biblScope>
			<biblScope unit="page" from="134" to="138" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b88">
	<analytic>
		<title level="a" type="main">Proof reconstruction for first-order logic and set-theoretical constructions</title>
		<author>
			<persName><forename type="first">Clément</forename><surname>Hurlin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Sixth International Workshop on Automated Verification of Critical Systems (AVOCS &apos;06) -Preliminary Proceedings</title>
				<editor>
			<persName><forename type="first">Stephan</forename><surname>Merz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Tobias</forename><surname>Nipkow</surname></persName>
		</editor>
		<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="page" from="157" to="162" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b89">
	<analytic>
		<title level="a" type="main">Integrating a first-order automatic prover in the HOL environment</title>
		<author>
			<persName><forename type="first">R</forename><surname>Kumar</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Kropf</surname></persName>
		</author>
		<author>
			<persName><forename type="first">K</forename><surname>Schneider</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 1991 International Workshop on the HOL Theorem Proving System and its Applications</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Archer</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">J</forename><surname>Joyce</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">K</forename><forename type="middle">N</forename><surname>Levitt</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">P</forename><forename type="middle">J</forename><surname>Windley</surname></persName>
		</editor>
		<meeting>the 1991 International Workshop on the HOL Theorem Proving System and its Applications<address><addrLine>Davis, California, USA</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society Press</publisher>
			<date type="published" when="1991-08">August 1991. 1992</date>
			<biblScope unit="page" from="170" to="176" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b90">
	<monogr>
		<title level="m" type="main">ASCII proof traces for MiniSat</title>
		<author>
			<persName><forename type="first">John</forename><surname>Matthews</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2006-08">August 2006</date>
		</imprint>
	</monogr>
	<note>Personal communication</note>
</biblStruct>

<biblStruct xml:id="b91">
	<analytic>
		<title level="a" type="main">TRAMP: Transformation of machine-found proofs into natural deduction proofs at the assertion level</title>
		<author>
			<persName><forename type="first">Andreas</forename><surname>Meier</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automated Deduction -CADE-17, 17th International Conference on Automated Deduction</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><surname>Itor</surname></persName>
		</editor>
		<meeting><address><addrLine>Pittsburgh, PA, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2000">June 17-20, 2000. 2000</date>
			<biblScope unit="volume">1831</biblScope>
			<biblScope unit="page" from="460" to="464" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b92">
	<analytic>
		<title level="a" type="main">Integration of interactive and automatic provers</title>
		<author>
			<persName><forename type="first">Jia</forename><surname>Meng</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Second CologNet Workshop on Implementation Technology for Computational Logic Systems</title>
				<editor>
			<persName><forename type="first">Manuel</forename><surname>Carro</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Jesus</forename><surname>Correas</surname></persName>
		</editor>
		<meeting><address><addrLine>FME</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2003-09">2003. September 2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b93">
	<analytic>
		<title level="a" type="main">Chaff: Engineering an efficient SAT solver</title>
		<author>
			<persName><forename type="first">;</forename><forename type="middle">M</forename><surname>Mmz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Moskewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Madigan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><surname>Malik</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 38th Design Automation Conference</title>
				<meeting>the 38th Design Automation Conference<address><addrLine>Las Vegas</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2001-06">June 2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b94">
	<analytic>
		<title level="a" type="main">Experiments on supporting interactive proof using resolution</title>
		<author>
			<persName><forename type="first">Jia</forename><surname>Meng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lawrence</forename><forename type="middle">C</forename><surname>Paulson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">David Basin and Michaël Rusinowitch, editors, Automated Reasoning: Second International Joint Conference, IJCAR 2004</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<meeting><address><addrLine>Cork, Ireland</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2004">July 4-8, 2004. 2004</date>
			<biblScope unit="volume">3097</biblScope>
			<biblScope unit="page" from="372" to="384" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b95">
	<analytic>
		<title level="a" type="main">Translating higher-order problems to first-order clauses</title>
		<author>
			<persName><forename type="first">Jia</forename><surname>Meng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lawrence</forename><forename type="middle">C</forename><surname>Paulson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">ESCoR: Empirically Successful Computerized Reasoning</title>
		<title level="s">CEUR Workshop Proceedings</title>
		<editor>
			<persName><forename type="first">Geoff</forename><surname>Sutcliffe</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Renate</forename><surname>Schmidt</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Stephan</forename><surname>Schulz</surname></persName>
		</editor>
		<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="volume">192</biblScope>
			<biblScope unit="page" from="70" to="80" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b96">
	<analytic>
		<title level="a" type="main">Applying SAT solving in classification of finite algebras</title>
		<author>
			<persName><forename type="first">Andreas</forename><surname>Meier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Volker</forename><surname>Sorge</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">35</biblScope>
			<biblScope unit="issue">1-3</biblScope>
			<biblScope unit="page" from="201" to="235" />
			<date type="published" when="2005-10">October 2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b97">
	<monogr>
		<title level="m" type="main">The Definition of Standard ML -Revised</title>
		<author>
			<persName><forename type="first">Robin</forename><surname>Milner</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Mads</forename><surname>Tofte</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Robert</forename><surname>Harper</surname></persName>
		</author>
		<author>
			<persName><forename type="first">David</forename><surname>Macqueen</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1997-05">May 1997</date>
			<publisher>MIT Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b98">
	<analytic>
		<title level="a" type="main">Isabelle/HOL -A Proof Assistant for Higher-Order Logic</title>
		<author>
			<persName><forename type="first">Tobias</forename><surname>Nipkow</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lawrence</forename><forename type="middle">C</forename><surname>Paulson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Markus</forename><surname>Wenzel</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="s">Lecture Notes in Computer Science</title>
		<imprint>
			<biblScope unit="volume">2283</biblScope>
			<date type="published" when="2002">2002</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b99">
	<analytic>
		<title level="a" type="main">On generating small clause normal forms</title>
		<author>
			<persName><forename type="first">Andreas</forename><surname>Nonnengart</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Georg</forename><surname>Rock</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Christoph</forename><surname>Weidenbach</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automated Deduction -CADE-15, 15th International Conference on Automated Deduction</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Claude</forename><surname>Kirchner</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Hélène</forename><surname>Kirchner</surname></persName>
		</editor>
		<meeting><address><addrLine>Lindau, Germany</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1998">July 5-10, 1998. 1998</date>
			<biblScope unit="volume">1421</biblScope>
			<biblScope unit="page" from="397" to="411" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b100">
	<analytic>
		<title level="a" type="main">PVS: A prototype verification system</title>
		<author>
			<persName><forename type="first">S</forename><surname>Owre</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">M</forename><surname>Rushby</surname></persName>
		</author>
		<author>
			<persName><forename type="first">N</forename><surname>Shankar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">11th International Conference on Automated Deduction (CADE)</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><forename type="first">Deepak</forename><surname>Kapur</surname></persName>
		</editor>
		<meeting><address><addrLine>Saratoga, NY</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1992-06">June 1992</date>
			<biblScope unit="volume">607</biblScope>
			<biblScope unit="page" from="748" to="752" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b101">
	<analytic>
		<title level="a" type="main">Isabelle: A Generic Theorem Prover</title>
		<author>
			<persName><forename type="first">Lawrence</forename><forename type="middle">C</forename><surname>Paulson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="s">Lecture Notes in Computer Science</title>
		<imprint>
			<biblScope unit="volume">828</biblScope>
			<date type="published" when="1994">1994</date>
			<publisher>Springer</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b102">
	<analytic>
		<title level="a" type="main">A SAT-based decision procedure for the subclass of unrollable list formulas in ACL2 (SULFA</title>
		<author>
			<persName><forename type="first">Erik</forename><surname>Reeber</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Warren</forename><forename type="middle">A</forename><surname>Hunt</surname><genName>Jr</genName></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automated Reasoning -Third International Joint Conference, IJCAR 2006</title>
		<title level="s">Lecture Notes in Artificial Intelligence</title>
		<editor>
			<persName><forename type="first">Ulrich</forename><surname>Furbach</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Natarajan</forename><surname>Shankar</surname></persName>
		</editor>
		<meeting><address><addrLine>Seattle, WA, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2006-08">August 2006. 2006</date>
			<biblScope unit="volume">4130</biblScope>
			<biblScope unit="page" from="453" to="467" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b103">
	<analytic>
		<title level="a" type="main">Using decision procedures with a higher-order logic</title>
		<author>
			<persName><forename type="first">Natarajan</forename><surname>Shankar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Theorem Proving in Higher Order Logics, 14th International Conference, TPHOLs 2001</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">Richard</forename><forename type="middle">J</forename><surname>Boulton</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Paul</forename><forename type="middle">B</forename><surname>Jackson</surname></persName>
		</editor>
		<meeting><address><addrLine>Edinburgh, Scotland, UK</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2001">September 3-6, 2001. 2001</date>
			<biblScope unit="volume">2152</biblScope>
			<biblScope unit="page" from="5" to="26" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b104">
	<analytic>
		<title level="a" type="main">The TPTP problem library: CNF release v1.2.1</title>
		<author>
			<persName><forename type="first">Geoff</forename><surname>Sutcliffe</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Christian</forename><surname>Suttner</surname></persName>
		</author>
		<ptr target="http://www.cs.miami.edu/∼tptp/" />
	</analytic>
	<monogr>
		<title level="j">Journal of Automated Reasoning</title>
		<imprint>
			<biblScope unit="volume">21</biblScope>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="177" to="203" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b105">
	<analytic>
		<title level="a" type="main">On solving Presburger and linear arithmetic with SAT</title>
		<author>
			<persName><forename type="first">Ofer</forename><surname>Strichman</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Formal Methods in Computer-Aided Design: 4th International Conference, FMCAD 2002</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">M</forename><forename type="middle">D</forename><surname>Aagaard</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">W</forename><surname>O'leary</surname></persName>
		</editor>
		<meeting><address><addrLine>Portland, OR, USA</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2002">November 6-8, 2002. 2002</date>
			<biblScope unit="volume">2517</biblScope>
			<biblScope unit="page" from="160" to="169" />
		</imprint>
	</monogr>
	<note>Proceedings</note>
</biblStruct>

<biblStruct xml:id="b106">
	<analytic>
		<title level="a" type="main">On the complexity of derivation in propositional calculus</title>
		<author>
			<persName><forename type="first">G</forename><forename type="middle">S</forename><surname>Tseitin</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Automation Of Reasoning: Classical Papers On Computational Logic</title>
				<editor>
			<persName><forename type="first">J</forename><surname>Siekmann</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">G</forename><surname>Wrightson</surname></persName>
		</editor>
		<imprint>
			<biblScope unit="volume">II</biblScope>
			<biblScope unit="page" from="466" to="483" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b107">
	<monogr>
		<title level="m">Also in Structures in Constructive Mathematics and Mathematical Logic Part II</title>
				<editor>
			<persName><forename type="first">A</forename><forename type="middle">O</forename><surname>Slisenko</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1968">1983. 1968</date>
			<biblScope unit="page" from="115" to="125" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b108">
	<analytic>
		<title level="a" type="main">Integrating a SAT solver with an LCF-style theorem prover</title>
		<author>
			<persName><forename type="first">Tjark</forename><surname>Weber</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of PDPAR&apos;05 -Third International Workshop on Pragmatical Aspects of Decision Procedures in Automated Reasoning</title>
				<editor>
			<persName><forename type="first">A</forename><surname>Armando</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">A</forename><surname>Cimatti</surname></persName>
		</editor>
		<meeting>PDPAR&apos;05 -Third International Workshop on Pragmatical Aspects of Decision Procedures in Automated Reasoning<address><addrLine>Edinburgh, UK</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005-07">July 2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b109">
	<analytic>
		<title level="a" type="main">Using a SAT solver as a fast decision procedure for propositional logic in an LCF-style theorem prover</title>
		<author>
			<persName><forename type="first">Tjark</forename><surname>Weber</surname></persName>
		</author>
		<idno>PRG-RR-05-02</idno>
	</analytic>
	<monogr>
		<title level="m">Theorem Proving in Higher Order Logics -18th International Conference, TPHOLs 2005</title>
				<editor>
			<persName><forename type="first">Joe</forename><surname>Hurd</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Edward</forename><surname>Smith</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Ashish</forename><surname>Darbari</surname></persName>
		</editor>
		<meeting><address><addrLine>Oxford, UK; Oxford, UK</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2005-08">August 2005. August 2005</date>
			<biblScope unit="page" from="180" to="189" />
		</imprint>
		<respStmt>
			<orgName>Oxford University Computing Laboratory, Programming Research Group</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">Research Report</note>
	<note>Emerging Trends Proceedings</note>
</biblStruct>

<biblStruct xml:id="b110">
	<analytic>
		<title level="a" type="main">Validating SAT solvers using an independent resolution-based checker: Practical implementations and other applications</title>
		<author>
			<persName><forename type="first">Lintao</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Sharad</forename><surname>Malik</surname></persName>
		</author>
		<author>
			<persName><forename type="first">;</forename><surname>Fiorentini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Fiorino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Moscato</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Design, Automation and Test in Europe</title>
				<meeting><address><addrLine>DATE</addrLine></address></meeting>
		<imprint>
			<publisher>IEEE Computer Society</publisher>
			<date type="published" when="2003">2003. 2003. 2006</date>
			<biblScope unit="volume">153</biblScope>
			<biblScope unit="page" from="23" to="33" />
		</imprint>
	</monogr>
	<note>Esbc: an application for computing stabilization bounds</note>
</biblStruct>

<biblStruct xml:id="b111">
	<analytic>
		<title level="a" type="main">A new O(n lg n)-space decision procedure for propositional intuitionistic logic</title>
		<author>
			<persName><forename type="first">A</forename><surname>Avellone</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Fiorino</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Moscato</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">LPAR 2002: Short Contributions, CSL 2003: Extended Posters, volume VIII of Kurt Gödel Society</title>
				<editor>
			<persName><forename type="first">Andrei</forename><surname>Voronkov</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Matthias</forename><surname>Baaz</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">Johann</forename><surname>Makowsky</surname></persName>
		</editor>
		<imprint>
			<publisher>Collegium Logicum</publisher>
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b112">
	<monogr>
		<title level="m" type="main">Intelligent Backtracking on Constraint Satisfaction Problems: Experimental and Theoretical Results</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">B</forename><surname>Baker</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1995">1995</date>
		</imprint>
		<respStmt>
			<orgName>University of Oregon</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
</biblStruct>

<biblStruct xml:id="b113">
	<analytic>
		<title level="a" type="main">Interactive theorem proving and program development: Coq&apos;Art: the calculus of inductive constructions</title>
		<author>
			<persName><forename type="first">Yves</forename><surname>Bertot</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Pierre) Castéran</surname></persName>
		</author>
		<idno>SV:adr</idno>
	</analytic>
	<monogr>
		<title level="m">Texts in theoretical computer science</title>
				<meeting><address><addrLine>pub-</addrLine></address></meeting>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b114">
	<monogr>
		<title level="m" type="main">Implementing Mathematics with the Nuprl Proof Development System</title>
		<author>
			<persName><forename type="first">R</forename><surname>Constable</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1986">1986</date>
			<publisher>Prentice-Hall</publisher>
			<pubPlace>Englewood Cliffs, New Jersey</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b115">
	<monogr>
		<title level="m" type="main">Modal Logic</title>
		<author>
			<persName><forename type="first">A</forename><surname>Chagrov</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Zakharyaschev</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1997">1997</date>
			<publisher>Oxford University Press</publisher>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b116">
	<analytic>
		<title level="a" type="main">Contraction-free sequent calculi for intuitionistic logic</title>
		<author>
			<persName><forename type="first">R</forename><surname>Dyckhoff</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Symbolic Logic</title>
		<imprint>
			<biblScope unit="volume">57</biblScope>
			<biblScope unit="issue">3</biblScope>
			<biblScope unit="page" from="795" to="807" />
			<date type="published" when="1992">1992</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b117">
	<analytic>
		<title level="a" type="main">On intuitionistic proof transformations, their complexity, and application to constructive program synthesis</title>
		<author>
			<persName><forename type="first">Uwe</forename><surname>Egly</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Stephan</forename><surname>Schmitt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Fundam. Inform</title>
		<imprint>
			<biblScope unit="volume">39</biblScope>
			<biblScope unit="issue">1-2</biblScope>
			<biblScope unit="page" from="59" to="83" />
			<date type="published" when="1999">1999</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b118">
	<analytic>
		<title level="a" type="main">Extracting exact time bounds from logical proofs</title>
		<author>
			<persName><forename type="first">M</forename><surname>Ferrari</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Fiorentini</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ornaghi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Logic Based Program Synthesis and Transformation, 11th International Workshop, LOPSTR 2001, Selected Papers</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">A</forename><surname>Pettorossi</surname></persName>
		</editor>
		<imprint>
			<publisher>Springer-Verlag</publisher>
			<date type="published" when="2002">2002</date>
			<biblScope unit="volume">2372</biblScope>
			<biblScope unit="page" from="245" to="265" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b119">
	<monogr>
		<title level="m" type="main">Intuitionistic Logic, Model Theory and Forcing</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">C</forename><surname>Fitting</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1969">1969</date>
			<pubPlace>North-Holland</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b120">
	<monogr>
		<title level="m" type="main">Improvements to propositional satisfiability search algorithms</title>
		<author>
			<persName><forename type="first">Jon</forename><surname>William</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Freeman</forename></persName>
		</author>
		<imprint>
			<date type="published" when="1995">1995</date>
		</imprint>
		<respStmt>
			<orgName>University of Pennsylvania</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
</biblStruct>

<biblStruct xml:id="b121">
	<analytic>
		<title level="a" type="main">An O(n log n)-space decision procedure for intuitionistic propositional logic</title>
		<author>
			<persName><forename type="first">J</forename><surname>Hudelmaier</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Logic and Computation</title>
		<imprint>
			<biblScope unit="volume">3</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="63" to="75" />
			<date type="published" when="1993">1993</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b122">
	<analytic>
		<title level="a" type="main">Timing analysis of combinational circuits in intuitionistic propositional logic</title>
		<author>
			<persName><forename type="first">M</forename><surname>Mendler</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Formal Methods in System Design</title>
		<imprint>
			<biblScope unit="volume">17</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="5" to="37" />
			<date type="published" when="2000">2000</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b123">
	<analytic>
		<title level="a" type="main">Intuitionistic Type Theory</title>
		<author>
			<persName><forename type="first">P</forename><surname>Martin-Löf</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Studies in Proof Theory</title>
				<meeting><address><addrLine>Napoli</addrLine></address></meeting>
		<imprint>
			<publisher>Bibliopolis</publisher>
			<date type="published" when="1984">1984</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b124">
	<analytic>
		<title level="a" type="main">Avoiding duplications in tableau systems for intuitionistic logic and Kuroda logic</title>
		<author>
			<persName><forename type="first">P</forename><surname>Miglioli</surname></persName>
		</author>
		<author>
			<persName><forename type="first">U</forename><surname>Moscato</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Ornaghi</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Logic Journal of the IGPL</title>
		<imprint>
			<biblScope unit="volume">5</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="145" to="167" />
			<date type="published" when="1997">1997</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b125">
	<monogr>
		<title level="m" type="main">The ILTP problem library for intuitionistic logic</title>
		<author>
			<persName><forename type="first">Thomas</forename><surname>Raths</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jens</forename><surname>Otten</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Christoph</forename><surname>Kreitz</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
	<note>release v1.1. To appear in Journal of Automated Reasoning</note>
</biblStruct>

<biblStruct xml:id="b126">
	<analytic>
		<title level="a" type="main">Learning to act using real-time dynamic programming</title>
		<author>
			<persName><forename type="first">A</forename><forename type="middle">G</forename><surname>Barto</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><forename type="middle">J</forename><surname>Bradtke</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Singh</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">72</biblScope>
			<biblScope unit="issue">1-2</biblScope>
			<biblScope unit="page" from="81" to="138" />
			<date type="published" when="1995">1995</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b127">
	<analytic>
		<title level="a" type="main">Decision-theoretic planning: Structural assumptions and computational leverage</title>
		<author>
			<persName><forename type="first">C</forename><surname>Boutilier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">T</forename><surname>Dean</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Hanks</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Artificial Intelligence Research</title>
		<imprint>
			<biblScope unit="volume">11</biblScope>
			<biblScope unit="page" from="1" to="94" />
			<date type="published" when="1999">1999</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b128">
	<monogr>
		<title level="m" type="main">Dynamic programming</title>
		<author>
			<persName><forename type="first">R</forename><forename type="middle">E</forename><surname>Bellman</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1957">1957</date>
			<publisher>Princeton University Press</publisher>
			<pubPlace>Princeton, NJ, USA</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b129">
	<analytic>
		<title level="a" type="main">Symbolic Dynamic Programming for First-Order MDPs</title>
		<author>
			<persName><forename type="first">C</forename><surname>Boutilier</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Reiter</surname></persName>
		</author>
		<author>
			<persName><forename type="first">B</forename><surname>Price</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Seventeenth International Conference on Artificial Intelligence (IJCAI&apos;2001)</title>
				<editor>
			<persName><forename type="first">Bernhard</forename><surname>Nebel</surname></persName>
		</editor>
		<meeting>the Seventeenth International Conference on Artificial Intelligence (IJCAI&apos;2001)</meeting>
		<imprint>
			<publisher>Morgan Kaufmann</publisher>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="690" to="700" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b130">
	<analytic>
		<title level="a" type="main">Planning under time constraints in stochastic domains</title>
		<author>
			<persName><forename type="first">T</forename><surname>Dean</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Kaelbling</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Kirman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">A</forename><surname>Nicholson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">76</biblScope>
			<biblScope unit="page" from="35" to="74" />
			<date type="published" when="1995">1995</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b131">
	<analytic>
		<title level="a" type="main">Symbolic heuristic search for factored Markov Decision Processes</title>
		<author>
			<persName><forename type="first">Z</forename><surname>Feng</surname></persName>
		</author>
		<author>
			<persName><forename type="first">E</forename><surname>Hansen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Eighteenth National Conference on Artificial Intelligence (AAAI&apos;2002)</title>
				<editor>
			<persName><forename type="first">R</forename><surname>Dechter</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">M</forename><surname>Kearns</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">R</forename><surname>Sutton</surname></persName>
		</editor>
		<meeting>the Eighteenth National Conference on Artificial Intelligence (AAAI&apos;2002)<address><addrLine>Edmonton, Canada</addrLine></address></meeting>
		<imprint>
			<publisher>AAAI Press</publisher>
			<date type="published" when="2002">2002</date>
			<biblScope unit="page" from="455" to="460" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b132">
	<analytic>
		<title level="a" type="main">Approximate policy iteration with a policy language bias</title>
		<author>
			<persName><forename type="first">A</forename><surname>Fern</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Yoon</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Givan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Seventeenth Annual Conference on Neural Information Processing Systems (NIPS&apos;2003)</title>
				<editor>
			<persName><forename type="first">S</forename><surname>Thrun</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">L</forename><surname>Saul</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">B</forename><surname>Schölkopf</surname></persName>
		</editor>
		<meeting>the Seventeenth Annual Conference on Neural Information Processing Systems (NIPS&apos;2003)<address><addrLine>Vancouver, Canada</addrLine></address></meeting>
		<imprint>
			<publisher>MIT Press</publisher>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b133">
	<analytic>
		<title level="a" type="main">Exploiting first-order regression in inductive policy selection</title>
		<author>
			<persName><forename type="first">C</forename><surname>Gretton</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Thiebaux</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Twentieth Conference on Uncertainty in Artificial Intelligence (UAI&apos;2004)</title>
				<editor>
			<persName><forename type="first">M</forename><surname>Chickering</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">J</forename><surname>Halpern</surname></persName>
		</editor>
		<meeting>the Twentieth Conference on Uncertainty in Artificial Intelligence (UAI&apos;2004)<address><addrLine>Banff, Canada</addrLine></address></meeting>
		<imprint>
			<publisher>Morgan Kaufmann</publisher>
			<date type="published" when="2004-07">July 2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b134">
	<analytic>
		<title level="a" type="main">A new deductive approach to planning</title>
		<author>
			<persName><forename type="first">S</forename><surname>Hölldobler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Schneeberger</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">New Generation Computing</title>
		<imprint>
			<biblScope unit="volume">8</biblScope>
			<biblScope unit="page" from="225" to="244" />
			<date type="published" when="1990">1990</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b135">
	<analytic>
		<title level="a" type="main">A Logic-Based Approach to Dynamic Programming</title>
		<author>
			<persName><forename type="first">S</forename><surname>Hölldobler</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Skvortsova</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Workshop on &quot;Learning and Planning in Markov Processes-Advances and Challenges</title>
				<meeting>the Workshop on &quot;Learning and Planning in Markov Processes-Advances and Challenges<address><addrLine>San Jose, CA</addrLine></address></meeting>
		<imprint>
			<publisher>AAAI Press</publisher>
			<date type="published" when="2004-07">July 2004</date>
			<biblScope unit="page" from="31" to="36" />
		</imprint>
	</monogr>
	<note>the Nineteenth National Conference on Artificial Intelligence (AAAI&apos;04)</note>
</biblStruct>

<biblStruct xml:id="b136">
	<analytic>
		<title level="a" type="main">LAO*: A heuristic search algorithm that finds solutions with loops</title>
		<author>
			<persName><forename type="first">E</forename><surname>Hansen</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zilberstein</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">129</biblScope>
			<biblScope unit="page" from="35" to="62" />
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b137">
	<analytic>
		<title level="a" type="main">An efficient subsumption algorithm for inductive logic programming</title>
		<author>
			<persName><forename type="first">J.-U</forename><surname>Kietz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Lübbe</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Eleventh International Conference on MAchine Learning</title>
				<meeting>the Eleventh International Conference on MAchine Learning</meeting>
		<imprint>
			<date type="published" when="1994">1994</date>
			<biblScope unit="page" from="130" to="138" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b138">
	<analytic>
		<title level="a" type="main">NP-completeness of the set unification and matching problems</title>
		<author>
			<persName><forename type="first">D</forename><surname>Kapur</surname></persName>
		</author>
		<author>
			<persName><forename type="first">P</forename><surname>Narendran</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Conference on Automated Deduction</title>
		<title level="s">Lecture Notes in Computer Science</title>
		<editor>
			<persName><forename type="first">J</forename><forename type="middle">H</forename><surname>Siekman</surname></persName>
		</editor>
		<meeting>the Conference on Automated Deduction<address><addrLine>Berlin</addrLine></address></meeting>
		<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="1986">1986</date>
			<biblScope unit="volume">230</biblScope>
			<biblScope unit="page" from="489" to="495" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b139">
	<analytic>
		<title level="a" type="main">Efficient symbolic reasoning for first-order MDPs</title>
		<author>
			<persName><forename type="first">E</forename><surname>Karabaev</surname></persName>
		</author>
		<author>
			<persName><forename type="first">G</forename><surname>Rammé</surname></persName>
		</author>
		<author>
			<persName><forename type="first">O</forename><surname>Skvortsova</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Workshop on &quot;Planning, Learning and Monitoring with Uncertainty and Dynamic Worlds&quot; at the Seventeenth European Conference on Artificial Intelligence (ECAI&apos;2006)</title>
				<meeting>the Workshop on &quot;Planning, Learning and Monitoring with Uncertainty and Dynamic Worlds&quot; at the Seventeenth European Conference on Artificial Intelligence (ECAI&apos;2006)<address><addrLine>Riva del Garda, Italy</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
	<note>To appear</note>
</biblStruct>

<biblStruct xml:id="b140">
	<analytic>
		<title level="a" type="main">Bellman goes relational</title>
		<author>
			<persName><forename type="first">K</forename><surname>Kersting</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><surname>Van Otterlo</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>De Raedt</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Twenty-First International Conference in Machine Learning (ICML&apos;2004)</title>
				<editor>
			<persName><forename type="first">C</forename><forename type="middle">E</forename><surname>Brodley</surname></persName>
		</editor>
		<meeting>the Twenty-First International Conference in Machine Learning (ICML&apos;2004)<address><addrLine>Banff, Canada</addrLine></address></meeting>
		<imprint>
			<publisher>ACM</publisher>
			<date type="published" when="2004-07">July 2004</date>
			<biblScope unit="page" from="465" to="472" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b141">
	<monogr>
		<title level="m" type="main">Markov Decision Processes -Discrete Stochastic Dynamic Programming</title>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">L</forename><surname>Puterman</surname></persName>
		</author>
		<imprint>
			<date type="published" when="1994">1994</date>
			<publisher>John Wiley &amp; Sons, Inc</publisher>
			<pubPlace>New York, NY</pubPlace>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b142">
	<analytic>
		<title level="a" type="main">The Frame Problem in the Situation Calculus: A Simple Solution (Sometimes) and a Completeness Result for Goal Regression</title>
		<author>
			<persName><forename type="first">R</forename><surname>Reiter</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Artificial Intelligence and Mathematical Theory of Computation: Papers in Honor of John McCarthy</title>
				<editor>
			<persName><forename type="first">Vladimir</forename><surname>Lifschitz</surname></persName>
		</editor>
		<meeting><address><addrLine>San Diego, CA</addrLine></address></meeting>
		<imprint>
			<publisher>Academic Press</publisher>
			<date type="published" when="1991">1991</date>
			<biblScope unit="page" from="359" to="380" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b143">
	<analytic>
		<title level="a" type="main">A machine-learning logic based on the resolution principle</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Robinson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of the Association for Computing Machinery</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="23" to="41" />
			<date type="published" when="1965">1965</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b144">
	<analytic>
		<title level="a" type="main">APRICODD: Approximate policy construction using decision diagrams</title>
		<author>
			<persName><forename type="first">R</forename><surname>St-Aubin</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Hoey</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><surname>Boutilier</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the Fourteenth Annual Conference on Neural Information Processing Systems (NIPS&apos;2000)</title>
				<editor>
			<persName><forename type="first">T</forename><forename type="middle">K</forename><surname>Leen</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">T</forename><forename type="middle">G</forename><surname>Dietterich</surname></persName>
		</editor>
		<editor>
			<persName><forename type="first">V</forename><surname>Tresp</surname></persName>
		</editor>
		<meeting>the Fourteenth Annual Conference on Neural Information Processing Systems (NIPS&apos;2000)<address><addrLine>Denver</addrLine></address></meeting>
		<imprint>
			<publisher>MIT Press</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="page" from="1089" to="1095" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b145">
	<analytic>
		<title level="a" type="main">Efficient θ-subsumption based on graph algorithms</title>
		<author>
			<persName><forename type="first">T</forename><surname>Scheffer</surname></persName>
		</author>
		<author>
			<persName><forename type="first">R</forename><surname>Herbrich</surname></persName>
		</author>
		<author>
			<persName><forename type="first">F</forename><surname>Wysotzki</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of the 6th International Workshop on Inductive Logic Programming</title>
				<meeting>the 6th International Workshop on Inductive Logic Programming<address><addrLine>Berlin</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1996-08">August 1996</date>
			<biblScope unit="volume">1314</biblScope>
			<biblScope unit="page" from="212" to="228" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b146">
	<analytic>
		<title level="a" type="main">Introduction to the fluent calculus</title>
		<author>
			<persName><forename type="first">M</forename><surname>Thielscher</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Electronic Transactions on Artificial Intelligence</title>
		<imprint>
			<biblScope unit="volume">2</biblScope>
			<biblScope unit="issue">3-4</biblScope>
			<biblScope unit="page" from="179" to="192" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b147">
	<analytic>
		<title level="a" type="main">The first probabilistic track of the International Planning Competition</title>
		<author>
			<persName><forename type="first">H</forename><forename type="middle">L S</forename><surname>Younes</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">L</forename><surname>Littman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">D</forename><surname>Weissman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Asmuth</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of Artificial Intelligence Research</title>
		<imprint>
			<biblScope unit="volume">24</biblScope>
			<biblScope unit="page" from="851" to="887" />
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b148">
	<analytic>
		<title level="a" type="main">Shatter: Efficient symmetry-breaking for boolean satisfiability</title>
		<author>
			<persName><forename type="first">A</forename><surname>Fadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Igor</forename><forename type="middle">L</forename><surname>Aloul</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Karem</forename><forename type="middle">A</forename><surname>Markov</surname></persName>
		</author>
		<author>
			<persName><surname>Sakallah</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Design Automation Conference</title>
				<imprint>
			<publisher>ACM/IEEE</publisher>
			<date type="published" when="2003">2003</date>
			<biblScope unit="page" from="836" to="839" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b149">
	<analytic>
		<title level="a" type="main">Old resolution meets modern SLS</title>
		<author>
			<persName><forename type="first">Duc</forename><forename type="middle">Nghia</forename><surname>Anbulagan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><surname>Pham</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Abdul</forename><surname>Slaney</surname></persName>
		</author>
		<author>
			<persName><surname>Sattar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 20th AAAI</title>
				<meeting>20th AAAI</meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="354" to="359" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b150">
	<analytic>
		<title level="a" type="main">Boosting SLS performance by incorporating resolution-based preprocessor</title>
		<author>
			<persName><forename type="first">Duc</forename><forename type="middle">Nghia</forename><surname>Anbulagan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><surname>Pham</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Abdul</forename><surname>Slaney</surname></persName>
		</author>
		<author>
			<persName><surname>Sattar</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of Third International Workshop on Local Search Techniques in Constraint Satisfaction (LSCS), in conjunction with CP-06</title>
				<meeting>Third International Workshop on Local Search Techniques in Constraint Satisfaction (LSCS), in conjunction with CP-06</meeting>
		<imprint>
			<date type="published" when="2006">2006</date>
			<biblScope unit="page" from="43" to="57" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b151">
	<analytic>
		<title level="a" type="main">Lookahead saturation with restriction for SAT</title>
		<author>
			<persName><forename type="first">Anbulagan</forename></persName>
		</author>
		<author>
			<persName><forename type="first">John</forename><surname>Slaney</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 11th CP</title>
				<meeting>11th CP</meeting>
		<imprint>
			<date type="published" when="2005">2005</date>
			<biblScope unit="page" from="727" to="731" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b152">
	<analytic>
		<title level="a" type="main">Efficient symmetry breaking for boolean satisfiability</title>
		<author>
			<persName><forename type="first">A</forename><surname>Fadi</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Karem</forename><forename type="middle">A</forename><surname>Aloul</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Igor</forename><forename type="middle">L</forename><surname>Sakallah</surname></persName>
		</author>
		<author>
			<persName><surname>Markov</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 18th IJCAI</title>
				<meeting>18th IJCAI<address><addrLine>Mexico</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b153">
	<analytic>
		<title level="a" type="main">Enhancing Davis Putnam with extended binary clause reasoning</title>
		<author>
			<persName><forename type="first">Fahiem</forename><surname>Bacchus</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 18th AAAI</title>
				<meeting>18th AAAI<address><addrLine>Edmonton, Canada</addrLine></address></meeting>
		<imprint>
			<publisher>AAAI Press</publisher>
			<date type="published" when="2002-08">August 2002</date>
			<biblScope unit="page" from="613" to="619" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b154">
	<analytic>
		<title level="a" type="main">A simplifier for propositional formulas with many binary clauses</title>
		<author>
			<persName><forename type="first">Ronen</forename><forename type="middle">I</forename><surname>Brafman</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 17th IJCAI</title>
				<meeting>17th IJCAI</meeting>
		<imprint>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="515" to="522" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b155">
	<analytic>
		<title level="a" type="main">A simplifier for propositional formulas with many binary clauses</title>
		<author>
			<persName><forename type="first">Ronen</forename><forename type="middle">I</forename><surname>Brafman</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">IEEE Transactions on Systems, Man, and Cybernetics, Part B</title>
		<imprint>
			<biblScope unit="volume">34</biblScope>
			<biblScope unit="issue">1</biblScope>
			<biblScope unit="page" from="52" to="59" />
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b156">
	<analytic>
		<title level="a" type="main">Effective preprocessing with hyperresolution and equality reduction</title>
		<author>
			<persName><forename type="first">Fahiem</forename><surname>Bacchus</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Jonathan</forename><surname>Winter</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Revised Selected Papers of SAT 2003</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2004">2919. 2004</date>
			<biblScope unit="page" from="341" to="355" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b157">
	<analytic>
		<title level="a" type="main">A backbone-search heuristic for efficient solving of hard 3-SAT formulae</title>
		<author>
			<persName><forename type="first">Olivier</forename><surname>Dubois</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Gilles</forename><surname>Dequen</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 17th IJCAI</title>
				<meeting>17th IJCAI<address><addrLine>Seattle, Washington, USA</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="248" to="253" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b158">
	<analytic>
		<title level="a" type="main">A computing procedure for quantification theory</title>
		<author>
			<persName><forename type="first">M</forename><surname>Davis</surname></persName>
		</author>
		<author>
			<persName><forename type="first">H</forename><surname>Putnam</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of the ACM</title>
		<imprint>
			<biblScope unit="volume">7</biblScope>
			<biblScope unit="page" from="201" to="215" />
			<date type="published" when="1960">1960</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b159">
	<analytic>
		<title level="a" type="main">Effective preprocessing in SAT through variable and clause elimination</title>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Eén</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Armin</forename><surname>Biere</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 8th SAT</title>
				<meeting>8th SAT</meeting>
		<imprint>
			<publisher>LNCS Springer</publisher>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b160">
	<analytic>
		<title level="a" type="main">An extensible SAT-solver</title>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Eén</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Sorensson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 6th SAT</title>
				<meeting>6th SAT</meeting>
		<imprint>
			<date type="published" when="2003">2003</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b161">
	<analytic>
		<title level="a" type="main">Aligning CNF-and equivalencereasoning</title>
		<author>
			<persName><forename type="first">Marijn</forename><surname>Heule</surname></persName>
		</author>
		<author>
			<persName><surname>Hans Van Maaren</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 7th SAT</title>
				<meeting>7th SAT<address><addrLine>Vancouver, Canada</addrLine></address></meeting>
		<imprint>
			<date type="published" when="2004">2004</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b162">
	<analytic>
		<title level="a" type="main">March dl: Adding adaptive heuristics and a new branching strategy</title>
		<author>
			<persName><forename type="first">Marijn</forename><surname>Heule</surname></persName>
		</author>
		<author>
			<persName><surname>Hans Van Maaren</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal on Satisfiability, Boolean Modeling and Computation</title>
		<imprint>
			<biblScope unit="issue">2</biblScope>
			<biblScope unit="page" from="47" to="59" />
			<date type="published" when="2006">2006</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b163">
	<analytic>
		<title level="a" type="main">Look-ahead versus look-back for satisfiability problems</title>
		<author>
			<persName><forename type="first">Min</forename><surname>Chu</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Anbulagan</forename><surname>Li</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 3rd CP</title>
				<meeting>3rd CP<address><addrLine>Schloss Hagenberg, Austria</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1997">1997</date>
			<biblScope unit="page" from="341" to="355" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b164">
	<analytic>
		<title level="a" type="main">Integrating equivalency reasoning into Davis-Putnam procedure</title>
		<author>
			<persName><forename type="first">Chu</forename><surname>Min</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Li</forename></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 17th AAAI</title>
				<meeting>17th AAAI<address><addrLine>USA</addrLine></address></meeting>
		<imprint>
			<publisher>AAAI Press</publisher>
			<date type="published" when="2000">2000</date>
			<biblScope unit="page" from="291" to="296" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b165">
	<analytic>
		<title level="a" type="main">The interaction between simplification and search in propositional satisfiability</title>
		<author>
			<persName><forename type="first">I</forename><surname>Lynce</surname></persName>
		</author>
		<author>
			<persName><forename type="first">J</forename><surname>Marques-Silva</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of CP&apos;01 Workshop on Modeling and Problem Formulation</title>
				<meeting>CP&apos;01 Workshop on Modeling and Problem Formulation</meeting>
		<imprint>
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b166">
	<analytic>
		<title level="a" type="main">Chaff: Engineering an efficient SAT solver</title>
		<author>
			<persName><forename type="middle">W</forename><surname>Mmz + ; M</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">F</forename><surname>Moskewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Y</forename><surname>Madigan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">L</forename><surname>Zhao</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><surname>Malik</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of DAC</title>
				<meeting>DAC</meeting>
		<imprint>
			<date type="published" when="2001">2001</date>
			<biblScope unit="page" from="530" to="535" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b167">
	<analytic>
		<title level="a" type="main">Recovering and exploiting structural knowledge from CNF formulas</title>
		<author>
			<persName><forename type="first">Richard</forename><surname>Ostrowski</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Éric</forename><surname>Grégoire</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Bertrand</forename><surname>Mazure</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Lakhdar</forename><surname>Sais</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 8th CP</title>
				<meeting>8th CP</meeting>
		<imprint>
			<date type="published" when="2002">2002</date>
			<biblScope unit="page" from="185" to="199" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b168">
	<analytic>
		<title level="a" type="main">A way to simplify truth functions</title>
		<author>
			<persName><forename type="first">W</forename><forename type="middle">V</forename><surname>Quine</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">American Mathematical Monthly</title>
		<imprint>
			<biblScope unit="volume">62</biblScope>
			<biblScope unit="page" from="627" to="631" />
			<date type="published" when="1955">1955</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b169">
	<analytic>
		<title level="a" type="main">A machine-oriented logic based on the resolution principle</title>
		<author>
			<persName><forename type="first">J</forename><forename type="middle">A</forename><surname>Robinson</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Journal of the ACM</title>
		<imprint>
			<biblScope unit="volume">12</biblScope>
			<biblScope unit="page" from="23" to="41" />
			<date type="published" when="1965">1965</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b170">
	<monogr>
		<title level="m" type="main">Efficient Algorithms for Clause Learning SAT Solvers</title>
		<author>
			<persName><forename type="first">Lawrence</forename><forename type="middle">O</forename><surname>Ryan</surname></persName>
		</author>
		<imprint>
			<date type="published" when="2004">2004</date>
			<pubPlace>Burnaby, Canada</pubPlace>
		</imprint>
		<respStmt>
			<orgName>Simon Fraser University</orgName>
		</respStmt>
	</monogr>
	<note type="report_type">PhD thesis</note>
</biblStruct>

<biblStruct xml:id="b171">
	<analytic>
		<title level="a" type="main">MINISAT v1.13 -A SAT solver with conflict-clause minimization</title>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Sorensson</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Niklas</forename><surname>Eén</surname></persName>
		</author>
		<ptr target="http://www.lri.fr/∼simon/contest05/results/descriptions/solvers/minisatstatic.pdf" />
	</analytic>
	<monogr>
		<title level="m">2005 International SAT Competition website</title>
				<imprint>
			<date type="published" when="2005">2005</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b172">
	<analytic>
		<title level="a" type="main">Ten challenges in propositional reasoning and search</title>
		<author>
			<persName><forename type="first">Bart</forename><surname>Selman</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Henry</forename><surname>Kautz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">David</forename><surname>Mcallester</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of 15th IJCAI</title>
				<meeting>15th IJCAI<address><addrLine>Nagoya, Aichi, Japan</addrLine></address></meeting>
		<imprint>
			<date type="published" when="1997">1997</date>
			<biblScope unit="page" from="50" to="54" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b173">
	<analytic>
		<title level="a" type="main">NiVER: Non increasing variable elimination resolution for preprocessing SAT instances</title>
		<author>
			<persName><forename type="first">Sathiamoorthy</forename><surname>Subbarayan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">Dhiraj</forename><forename type="middle">K</forename><surname>Pradhan</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Revised Selected Papers of SAT 2004</title>
				<imprint>
			<publisher>Springer</publisher>
			<date type="published" when="2005">2005</date>
			<biblScope unit="volume">3542</biblScope>
			<biblScope unit="page" from="276" to="291" />
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b174">
	<analytic>
		<title level="a" type="main">A two-phase algorithm for solving a class of hard satisfiability problems</title>
		<author>
			<persName><forename type="first">P</forename><surname>Joost</surname></persName>
		</author>
		<author>
			<persName><surname>Warners</surname></persName>
		</author>
		<author>
			<persName><surname>Hans Van Maaren</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="j">Operations Research Letters</title>
		<imprint>
			<biblScope unit="volume">23</biblScope>
			<biblScope unit="page" from="81" to="88" />
			<date type="published" when="1998">1998</date>
		</imprint>
	</monogr>
</biblStruct>

<biblStruct xml:id="b175">
	<analytic>
		<title level="a" type="main">Efficient conflict driven learning in a Boolean satisfiability solver</title>
		<author>
			<persName><forename type="first">L</forename><surname>Zhang</surname></persName>
		</author>
		<author>
			<persName><forename type="first">C</forename><forename type="middle">F</forename><surname>Madigan</surname></persName>
		</author>
		<author>
			<persName><forename type="first">M</forename><forename type="middle">W</forename><surname>Moskewicz</surname></persName>
		</author>
		<author>
			<persName><forename type="first">S</forename><surname>Malik</surname></persName>
		</author>
	</analytic>
	<monogr>
		<title level="m">Proceedings of International Conference on Computer Aided Design ICCAD2001</title>
				<meeting>International Conference on Computer Aided Design ICCAD2001</meeting>
		<imprint>
			<date type="published" when="2001">2001</date>
		</imprint>
	</monogr>
</biblStruct>

				</listBibl>
			</div>
		</back>
	</text>
</TEI>
