doc-src/Logics/LK.tex
 author wenzelm Wed Jul 25 12:38:54 2012 +0200 (2012-07-25) changeset 48497 ba61aceaa18a parent 43049 99985426c0bb permissions -rw-r--r--
some updates on "Building a repository version of Isabelle";
     1 \chapter{First-Order Sequent Calculus}

     2 \index{sequent calculus|(}

     3

     4 The theory~\thydx{LK} implements classical first-order logic through Gentzen's

     5 sequent calculus (see Gallier~\cite{gallier86} or Takeuti~\cite{takeuti87}).

     6 Resembling the method of semantic tableaux, the calculus is well suited for

     7 backwards proof.  Assertions have the form $$\Gamma\turn \Delta$$, where

     8 $$\Gamma$$ and $$\Delta$$ are lists of formulae.  Associative unification,

     9 simulated by higher-order unification, handles lists

    10 (\S\ref{sec:assoc-unification} presents details, if you are interested).

    11

    12 The logic is many-sorted, using Isabelle's type classes.  The class of

    13 first-order terms is called \cldx{term}.  No types of individuals are

    14 provided, but extensions can define types such as {\tt nat::term} and type

    15 constructors such as {\tt list::(term)term}.  Below, the type variable

    16 $\alpha$ ranges over class {\tt term}; the equality symbol and quantifiers

    17 are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which

    18 belongs to class {\tt logic}.

    19

    20 LK implements a classical logic theorem prover that is nearly as powerful as

    21 the generic classical reasoner.  The simplifier is now available too.

    22

    23 To work in LK, start up Isabelle specifying  \texttt{Sequents} as the

    24 object-logic.  Once in Isabelle, change the context to theory \texttt{LK.thy}:

    25 \begin{ttbox}

    26 isabelle Sequents

    27 context LK.thy;

    28 \end{ttbox}

    29 Modal logic and linear logic are also available, but unfortunately they are

    30 not documented.

    31

    32

    33 \begin{figure}

    34 \begin{center}

    35 \begin{tabular}{rrr}

    36   \it name      &\it meta-type          & \it description       \\

    37   \cdx{Trueprop}& $[sobj\To sobj, sobj\To sobj]\To prop$ & coercion to $prop$\\

    38   \cdx{Seqof}   & $[o,sobj]\To sobj$    & singleton sequence    \\

    39   \cdx{Not}     & $o\To o$              & negation ($\neg$)     \\

    40   \cdx{True}    & $o$                   & tautology ($\top$)    \\

    41   \cdx{False}   & $o$                   & absurdity ($\bot$)

    42 \end{tabular}

    43 \end{center}

    44 \subcaption{Constants}

    45

    46 \begin{center}

    47 \begin{tabular}{llrrr}

    48   \it symbol &\it name     &\it meta-type & \it priority & \it description \\

    49   \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 &

    50         universal quantifier ($\forall$) \\

    51   \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 &

    52         existential quantifier ($\exists$) \\

    53   \sdx{THE} & \cdx{The}  & $(\alpha\To o)\To \alpha$ & 10 &

    54         definite description ($\iota$)

    55 \end{tabular}

    56 \end{center}

    57 \subcaption{Binders}

    58

    59 \begin{center}

    60 \index{*"= symbol}

    61 \index{&@{\tt\&} symbol}

    62 \index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working

    63 \index{*"-"-"> symbol}

    64 \index{*"<"-"> symbol}

    65 \begin{tabular}{rrrr}

    66     \it symbol  & \it meta-type         & \it priority & \it description \\

    67     \tt = &     $[\alpha,\alpha]\To o$  & Left 50 & equality ($=$) \\

    68     \tt \& &    $[o,o]\To o$ & Right 35 & conjunction ($\conj$) \\

    69     \tt | &     $[o,o]\To o$ & Right 30 & disjunction ($\disj$) \\

    70     \tt --> &   $[o,o]\To o$ & Right 25 & implication ($\imp$) \\

    71     \tt <-> &   $[o,o]\To o$ & Right 25 & biconditional ($\bimp$)

    72 \end{tabular}

    73 \end{center}

    74 \subcaption{Infixes}

    75

    76 \begin{center}

    77 \begin{tabular}{rrr}

    78   \it external          & \it internal  & \it description \\

    79   \tt $\Gamma$ |- $\Delta$  &  \tt Trueprop($\Gamma$, $\Delta$) &

    80         sequent $\Gamma\turn \Delta$

    81 \end{tabular}

    82 \end{center}

    83 \subcaption{Translations}

    84 \caption{Syntax of {\tt LK}} \label{lk-syntax}

    85 \end{figure}

    86

    87

    88 \begin{figure}

    89 \dquotes

    90 $\begin{array}{rcl}   91 prop & = & sequence " |- " sequence   92 \\[2ex]   93 sequence & = & elem \quad (", " elem)^* \\   94 & | & empty   95 \\[2ex]   96 elem & = & "\ " term \\   97 & | & formula \\   98 & | & "<<" sequence ">>"   99 \\[2ex]   100 formula & = & \hbox{expression of type~o} \\   101 & | & term " = " term \\   102 & | & "\ttilde\ " formula \\   103 & | & formula " \& " formula \\   104 & | & formula " | " formula \\   105 & | & formula " --> " formula \\   106 & | & formula " <-> " formula \\   107 & | & "ALL~" id~id^* " . " formula \\   108 & | & "EX~~" id~id^* " . " formula \\   109 & | & "THE~" id~ " . " formula   110 \end{array}   111$

   112 \caption{Grammar of {\tt LK}} \label{lk-grammar}

   113 \end{figure}

   114

   115

   116

   117

   118 \begin{figure}

   119 \begin{ttbox}

   120 \tdx{basic}       $H, P,$G |- $E, P,$F

   121

   122 \tdx{contRS}      $H |-$E, $S,$S, $F ==>$H |- $E,$S, $F   123 \tdx{contLS}$H, $S,$S, $G |-$E ==> $H,$S, $G |-$E

   124

   125 \tdx{thinRS}      $H |-$E, $F ==>$H |- $E,$S, $F   126 \tdx{thinLS}$H, $G |-$E ==> $H,$S, $G |-$E

   127

   128 \tdx{cut}         [| $H |-$E, P;  $H, P |-$E |] ==> $H |-$E

   129 \subcaption{Structural rules}

   130

   131 \tdx{refl}        $H |-$E, a=a, $F   132 \tdx{subst}$H(a), $G(a) |-$E(a) ==> $H(b), a=b,$G(b) |- $E(b)   133 \subcaption{Equality rules}   134 \end{ttbox}   135   136 \caption{Basic Rules of {\tt LK}} \label{lk-basic-rules}   137 \end{figure}   138   139 \begin{figure}   140 \begin{ttbox}   141 \tdx{True_def} True == False-->False   142 \tdx{iff_def} P<->Q == (P-->Q) & (Q-->P)   143   144 \tdx{conjR} [|$H|- $E, P,$F;  $H|-$E, Q, $F |] ==>$H|- $E, P&Q,$F

   145 \tdx{conjL}   $H, P, Q,$G |- $E ==>$H, P & Q, $G |-$E

   146

   147 \tdx{disjR}   $H |-$E, P, Q, $F ==>$H |- $E, P|Q,$F

   148 \tdx{disjL}   [| $H, P,$G |- $E;$H, Q, $G |-$E |] ==> $H, P|Q,$G |- $E   149   150 \tdx{impR}$H, P |- $E, Q,$F ==> $H |-$E, P-->Q, $F   151 \tdx{impL} [|$H,$G |-$E,P;  $H, Q,$G |- $E |] ==>$H, P-->Q, $G |-$E

   152

   153 \tdx{notR}    $H, P |-$E, $F ==>$H |- $E, ~P,$F

   154 \tdx{notL}    $H,$G |- $E, P ==>$H, ~P, $G |-$E

   155

   156 \tdx{FalseL}  $H, False,$G |- $E   157   158 \tdx{allR} (!!x.$H|- $E, P(x),$F) ==> $H|-$E, ALL x. P(x), $F   159 \tdx{allL}$H, P(x), $G, ALL x. P(x) |-$E ==> $H, ALL x. P(x),$G|- $E   160   161 \tdx{exR}$H|- $E, P(x),$F, EX x. P(x) ==> $H|-$E, EX x. P(x), $F   162 \tdx{exL} (!!x.$H, P(x), $G|-$E) ==> $H, EX x. P(x),$G|- $E   163   164 \tdx{The} [|$H |- $E, P(a),$F;  !!x. $H, P(x) |-$E, x=a, $F |] ==>   165$H |- $E, P(THE x. P(x)),$F

   166 \subcaption{Logical rules}

   167 \end{ttbox}

   168

   169 \caption{Rules of {\tt LK}}  \label{lk-rules}

   170 \end{figure}

   171

   172

   173 \section{Syntax and rules of inference}

   174 \index{*sobj type}

   175

   176 Figure~\ref{lk-syntax} gives the syntax for {\tt LK}, which is complicated

   177 by the representation of sequents.  Type $sobj\To sobj$ represents a list

   178 of formulae.

   179

   180 The \textbf{definite description} operator~$\iota x. P[x]$ stands for some~$a$

   181 satisfying~$P[a]$, if one exists and is unique.  Since all terms in LK denote

   182 something, a description is always meaningful, but we do not know its value

   183 unless $P[x]$ defines it uniquely.  The Isabelle notation is \hbox{\tt THE

   184   $x$.\ $P[x]$}.  The corresponding rule (Fig.\ts\ref{lk-rules}) does not

   185 entail the Axiom of Choice because it requires uniqueness.

   186

   187 Conditional expressions are available with the notation

   188 $\dquotes   189 "if"~formula~"then"~term~"else"~term.$

   190

   191 Figure~\ref{lk-grammar} presents the grammar of LK.  Traditionally,

   192 $$\Gamma$$ and $$\Delta$$ are meta-variables for sequences.  In Isabelle's

   193 notation, the prefix~\verb|$| on a term makes it range over sequences.   194 In a sequent, anything not prefixed by \verb|$| is taken as a formula.

   195

   196 The notation \texttt{<<$sequence$>>} stands for a sequence of formul\ae{}.

   197 For example, you can declare the constant \texttt{imps} to consist of two

   198 implications:

   199 \begin{ttbox}

   200 consts     P,Q,R :: o

   201 constdefs imps :: seq'=>seq'

   202          "imps == <<P --> Q, Q --> R>>"

   203 \end{ttbox}

   204 Then you can use it in axioms and goals, for example

   205 \begin{ttbox}

   206 Goalw [imps_def] "P, $imps |- R";   207 {\out Level 0}   208 {\out P,$imps |- R}

   209 {\out  1. P, P --> Q, Q --> R |- R}

   210 by (Fast_tac 1);

   211 {\out Level 1}

   212 {\out P, $imps |- R}   213 {\out No subgoals!}   214 \end{ttbox}   215   216 Figures~\ref{lk-basic-rules} and~\ref{lk-rules} present the rules of theory   217 \thydx{LK}. The connective$\bimp$is defined using$\conj$and$\imp$. The   218 axiom for basic sequents is expressed in a form that provides automatic   219 thinning: redundant formulae are simply ignored. The other rules are   220 expressed in the form most suitable for backward proof; exchange and   221 contraction rules are not normally required, although they are provided   222 anyway.   223   224   225 \begin{figure}   226 \begin{ttbox}   227 \tdx{thinR}$H |- $E,$F ==> $H |-$E, P, $F   228 \tdx{thinL}$H, $G |-$E ==> $H, P,$G |- $E   229   230 \tdx{contR}$H |- $E, P, P,$F ==> $H |-$E, P, $F   231 \tdx{contL}$H, P, P, $G |-$E ==> $H, P,$G |- $E   232   233 \tdx{symR}$H |- $E,$F, a=b ==> $H |-$E, b=a, $F   234 \tdx{symL}$H, $G, b=a |-$E ==> $H, a=b,$G |- $E   235   236 \tdx{transR} [|$H|- $E,$F, a=b;  $H|-$E, $F, b=c |]   237 ==>$H|- $E, a=c,$F

   238

   239 \tdx{TrueR}        $H |-$E, True, $F   240   241 \tdx{iffR} [|$H, P |- $E, Q,$F;  $H, Q |-$E, P, $F |]   242 ==>$H |- $E, P<->Q,$F

   243

   244 \tdx{iffL}         [| $H,$G |- $E, P, Q;$H, Q, P, $G |-$E |]

   245              ==> $H, P<->Q,$G |- $E   246   247 \tdx{allL_thin}$H, P(x), $G |-$E ==> $H, ALL x. P(x),$G |- $E   248 \tdx{exR_thin}$H |- $E, P(x),$F ==> $H |-$E, EX x. P(x), $F   249   250 \tdx{the_equality} [|$H |- $E, P(a),$F;

   251                 !!x. $H, P(x) |-$E, x=a, $F |]   252 ==>$H |- $E, (THE x. P(x)) = a,$F

   253 \end{ttbox}

   254

   255 \caption{Derived rules for {\tt LK}} \label{lk-derived}

   256 \end{figure}

   257

   258 Figure~\ref{lk-derived} presents derived rules, including rules for

   259 $\bimp$.  The weakened quantifier rules discard each quantification after a

   260 single use; in an automatic proof procedure, they guarantee termination,

   261 but are incomplete.  Multiple use of a quantifier can be obtained by a

   262 contraction rule, which in backward proof duplicates a formula.  The tactic

   263 {\tt res_inst_tac} can instantiate the variable~{\tt?P} in these rules,

   264 specifying the formula to duplicate.

   265 See theory {\tt Sequents/LK0} in the sources for complete listings of

   266 the rules and derived rules.

   267

   268 To support the simplifier, hundreds of equivalences are proved for

   269 the logical connectives and for if-then-else expressions.  See the file

   270 \texttt{Sequents/simpdata.ML}.

   271

   272 \section{Automatic Proof}

   273

   274 LK instantiates Isabelle's simplifier.  Both equality ($=$) and the

   275 biconditional ($\bimp$) may be used for rewriting.  The tactic

   276 \texttt{Simp_tac} refers to the default simpset (\texttt{simpset()}).  With

   277 sequents, the \texttt{full_} and \texttt{asm_} forms of the simplifier are not

   278 required; all the formulae{} in the sequent will be simplified.  The left-hand

   279 formulae{} are taken as rewrite rules.  (Thus, the behaviour is what you would

   280 normally expect from calling \texttt{Asm_full_simp_tac}.)

   281

   282 For classical reasoning, several tactics are available:

   283 \begin{ttbox}

   284 Safe_tac : int -> tactic

   285 Step_tac : int -> tactic

   286 Fast_tac : int -> tactic

   287 Best_tac : int -> tactic

   288 Pc_tac   : int -> tactic

   289 \end{ttbox}

   290 These refer not to the standard classical reasoner but to a separate one

   291 provided for the sequent calculus.  Two commands are available for adding new

   292 sequent calculus rules, safe or unsafe, to the default theorem pack'':

   293 \begin{ttbox}

   294 Add_safes   : thm list -> unit

   295 Add_unsafes : thm list -> unit

   296 \end{ttbox}

   297 To control the set of rules for individual invocations, lower-case versions of

   298 all these primitives are available.  Sections~\ref{sec:thm-pack}

   299 and~\ref{sec:sequent-provers} give full details.

   300

   301

   302 \section{Tactics for the cut rule}

   303

   304 According to the cut-elimination theorem, the cut rule can be eliminated

   305 from proofs of sequents.  But the rule is still essential.  It can be used

   306 to structure a proof into lemmas, avoiding repeated proofs of the same

   307 formula.  More importantly, the cut rule cannot be eliminated from

   308 derivations of rules.  For example, there is a trivial cut-free proof of

   309 the sequent $$P\conj Q\turn Q\conj P$$.

   310 Noting this, we might want to derive a rule for swapping the conjuncts

   311 in a right-hand formula:

   312 $\Gamma\turn \Delta, P\conj Q\over \Gamma\turn \Delta, Q\conj P$

   313 The cut rule must be used, for $P\conj Q$ is not a subformula of $Q\conj   314 P$.  Most cuts directly involve a premise of the rule being derived (a

   315 meta-assumption).  In a few cases, the cut formula is not part of any

   316 premise, but serves as a bridge between the premises and the conclusion.

   317 In such proofs, the cut formula is specified by calling an appropriate

   318 tactic.

   319

   320 \begin{ttbox}

   321 cutR_tac : string -> int -> tactic

   322 cutL_tac : string -> int -> tactic

   323 \end{ttbox}

   324 These tactics refine a subgoal into two by applying the cut rule.  The cut

   325 formula is given as a string, and replaces some other formula in the sequent.

   326 \begin{ttdescription}

   327 \item[\ttindexbold{cutR_tac} {\it P\/} {\it i}] reads an LK formula~$P$, and

   328   applies the cut rule to subgoal~$i$.  It then deletes some formula from the

   329   right side of subgoal~$i$, replacing that formula by~$P$.

   330

   331 \item[\ttindexbold{cutL_tac} {\it P\/} {\it i}] reads an LK formula~$P$, and

   332   applies the cut rule to subgoal~$i$.  It then deletes some formula from the

   333   left side of the new subgoal $i+1$, replacing that formula by~$P$.

   334 \end{ttdescription}

   335 All the structural rules --- cut, contraction, and thinning --- can be

   336 applied to particular formulae using {\tt res_inst_tac}.

   337

   338

   339 \section{Tactics for sequents}

   340 \begin{ttbox}

   341 forms_of_seq       : term -> term list

   342 could_res          : term * term -> bool

   343 could_resolve_seq  : term * term -> bool

   344 filseq_resolve_tac : thm list -> int -> int -> tactic

   345 \end{ttbox}

   346 Associative unification is not as efficient as it might be, in part because

   347 the representation of lists defeats some of Isabelle's internal

   348 optimisations.  The following operations implement faster rule application,

   349 and may have other uses.

   350 \begin{ttdescription}

   351 \item[\ttindexbold{forms_of_seq} {\it t}]

   352 returns the list of all formulae in the sequent~$t$, removing sequence

   353 variables.

   354

   355 \item[\ttindexbold{could_res} ($t$,$u$)]

   356 tests whether two formula lists could be resolved.  List $t$ is from a

   357 premise or subgoal, while $u$ is from the conclusion of an object-rule.

   358 Assuming that each formula in $u$ is surrounded by sequence variables, it

   359 checks that each conclusion formula is unifiable (using {\tt could_unify})

   360 with some subgoal formula.

   361

   362 \item[\ttindexbold{could_resolve_seq} ($t$,$u$)]

   363   tests whether two sequents could be resolved.  Sequent $t$ is a premise

   364   or subgoal, while $u$ is the conclusion of an object-rule.  It simply

   365   calls {\tt could_res} twice to check that both the left and the right

   366   sides of the sequents are compatible.

   367

   368 \item[\ttindexbold{filseq_resolve_tac} {\it thms} {\it maxr} {\it i}]

   369 uses {\tt filter_thms could_resolve} to extract the {\it thms} that are

   370 applicable to subgoal~$i$.  If more than {\it maxr\/} theorems are

   371 applicable then the tactic fails.  Otherwise it calls {\tt resolve_tac}.

   372 Thus, it is the sequent calculus analogue of \ttindex{filt_resolve_tac}.

   373 \end{ttdescription}

   374

   375

   376 \section{A simple example of classical reasoning}

   377 The theorem $\turn\ex{y}\all{x}P(y)\imp P(x)$ is a standard example of the

   378 classical treatment of the existential quantifier.  Classical reasoning is

   379 easy using~LK, as you can see by comparing this proof with the one given in

   380 the FOL manual~\cite{isabelle-ZF}.  From a logical point of view, the proofs

   381 are essentially the same; the key step here is to use \tdx{exR} rather than

   382 the weaker~\tdx{exR_thin}.

   383 \begin{ttbox}

   384 Goal "|- EX y. ALL x. P(y)-->P(x)";

   385 {\out Level 0}

   386 {\out  |- EX y. ALL x. P(y) --> P(x)}

   387 {\out  1.  |- EX y. ALL x. P(y) --> P(x)}

   388 by (resolve_tac [exR] 1);

   389 {\out Level 1}

   390 {\out  |- EX y. ALL x. P(y) --> P(x)}

   391 {\out  1.  |- ALL x. P(?x) --> P(x), EX x. ALL xa. P(x) --> P(xa)}

   392 \end{ttbox}

   393 There are now two formulae on the right side.  Keeping the existential one

   394 in reserve, we break down the universal one.

   395 \begin{ttbox}

   396 by (resolve_tac [allR] 1);

   397 {\out Level 2}

   398 {\out  |- EX y. ALL x. P(y) --> P(x)}

   399 {\out  1. !!x.  |- P(?x) --> P(x), EX x. ALL xa. P(x) --> P(xa)}

   400 by (resolve_tac [impR] 1);

   401 {\out Level 3}

   402 {\out  |- EX y. ALL x. P(y) --> P(x)}

   403 {\out  1. !!x. P(?x) |- P(x), EX x. ALL xa. P(x) --> P(xa)}

   404 \end{ttbox}

   405 Because LK is a sequent calculus, the formula~$P(\Var{x})$ does not become an

   406 assumption; instead, it moves to the left side.  The resulting subgoal cannot

   407 be instantiated to a basic sequent: the bound variable~$x$ is not unifiable

   408 with the unknown~$\Var{x}$.

   409 \begin{ttbox}

   410 by (resolve_tac [basic] 1);

   411 {\out by: tactic failed}

   412 \end{ttbox}

   413 We reuse the existential formula using~\tdx{exR_thin}, which discards

   414 it; we shall not need it a third time.  We again break down the resulting

   415 formula.

   416 \begin{ttbox}

   417 by (resolve_tac [exR_thin] 1);

   418 {\out Level 4}

   419 {\out  |- EX y. ALL x. P(y) --> P(x)}

   420 {\out  1. !!x. P(?x) |- P(x), ALL xa. P(?x7(x)) --> P(xa)}

   421 by (resolve_tac [allR] 1);

   422 {\out Level 5}

   423 {\out  |- EX y. ALL x. P(y) --> P(x)}

   424 {\out  1. !!x xa. P(?x) |- P(x), P(?x7(x)) --> P(xa)}

   425 by (resolve_tac [impR] 1);

   426 {\out Level 6}

   427 {\out  |- EX y. ALL x. P(y) --> P(x)}

   428 {\out  1. !!x xa. P(?x), P(?x7(x)) |- P(x), P(xa)}

   429 \end{ttbox}

   430 Subgoal~1 seems to offer lots of possibilities.  Actually the only useful

   431 step is instantiating~$\Var{x@7}$ to $\lambda x. x$,

   432 transforming~$\Var{x@7}(x)$ into~$x$.

   433 \begin{ttbox}

   434 by (resolve_tac [basic] 1);

   435 {\out Level 7}

   436 {\out  |- EX y. ALL x. P(y) --> P(x)}

   437 {\out No subgoals!}

   438 \end{ttbox}

   439 This theorem can be proved automatically.  Because it involves quantifier

   440 duplication, we employ best-first search:

   441 \begin{ttbox}

   442 Goal "|- EX y. ALL x. P(y)-->P(x)";

   443 {\out Level 0}

   444 {\out  |- EX y. ALL x. P(y) --> P(x)}

   445 {\out  1.  |- EX y. ALL x. P(y) --> P(x)}

   446 by (best_tac LK_dup_pack 1);

   447 {\out Level 1}

   448 {\out  |- EX y. ALL x. P(y) --> P(x)}

   449 {\out No subgoals!}

   450 \end{ttbox}

   451

   452

   453

   454 \section{A more complex proof}

   455 Many of Pelletier's test problems for theorem provers \cite{pelletier86}

   456 can be solved automatically.  Problem~39 concerns set theory, asserting

   457 that there is no Russell set --- a set consisting of those sets that are

   458 not members of themselves:

   459 $\turn \neg (\exists x. \forall y. y\in x \bimp y\not\in y)$

   460 This does not require special properties of membership; we may generalize

   461 $x\in y$ to an arbitrary predicate~$F(x,y)$.  The theorem, which is trivial

   462 for \texttt{Fast_tac}, has a short manual proof.  See the directory {\tt

   463   Sequents/LK} for many more examples.

   464

   465 We set the main goal and move the negated formula to the left.

   466 \begin{ttbox}

   467 Goal "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";

   468 {\out Level 0}

   469 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   470 {\out  1.  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   471 by (resolve_tac [notR] 1);

   472 {\out Level 1}

   473 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   474 {\out  1. EX x. ALL y. F(y,x) <-> ~ F(y,y) |-}

   475 \end{ttbox}

   476 The right side is empty; we strip both quantifiers from the formula on the

   477 left.

   478 \begin{ttbox}

   479 by (resolve_tac [exL] 1);

   480 {\out Level 2}

   481 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   482 {\out  1. !!x. ALL y. F(y,x) <-> ~ F(y,y) |-}

   483 by (resolve_tac [allL_thin] 1);

   484 {\out Level 3}

   485 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   486 {\out  1. !!x. F(?x2(x),x) <-> ~ F(?x2(x),?x2(x)) |-}

   487 \end{ttbox}

   488 The rule \tdx{iffL} says, if $P\bimp Q$ then $P$ and~$Q$ are either

   489 both true or both false.  It yields two subgoals.

   490 \begin{ttbox}

   491 by (resolve_tac [iffL] 1);

   492 {\out Level 4}

   493 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   494 {\out  1. !!x.  |- F(?x2(x),x), ~ F(?x2(x),?x2(x))}

   495 {\out  2. !!x. ~ F(?x2(x),?x2(x)), F(?x2(x),x) |-}

   496 \end{ttbox}

   497 We must instantiate~$\Var{x@2}$, the shared unknown, to satisfy both

   498 subgoals.  Beginning with subgoal~2, we move a negated formula to the left

   499 and create a basic sequent.

   500 \begin{ttbox}

   501 by (resolve_tac [notL] 2);

   502 {\out Level 5}

   503 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   504 {\out  1. !!x.  |- F(?x2(x),x), ~ F(?x2(x),?x2(x))}

   505 {\out  2. !!x. F(?x2(x),x) |- F(?x2(x),?x2(x))}

   506 by (resolve_tac [basic] 2);

   507 {\out Level 6}

   508 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   509 {\out  1. !!x.  |- F(x,x), ~ F(x,x)}

   510 \end{ttbox}

   511 Thanks to the instantiation of~$\Var{x@2}$, subgoal~1 is obviously true.

   512 \begin{ttbox}

   513 by (resolve_tac [notR] 1);

   514 {\out Level 7}

   515 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   516 {\out  1. !!x. F(x,x) |- F(x,x)}

   517 by (resolve_tac [basic] 1);

   518 {\out Level 8}

   519 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}

   520 {\out No subgoals!}

   521 \end{ttbox}

   522

   523 \section{*Unification for lists}\label{sec:assoc-unification}

   524

   525 Higher-order unification includes associative unification as a special

   526 case, by an encoding that involves function composition

   527 \cite[page~37]{huet78}.  To represent lists, let $C$ be a new constant.

   528 The empty list is $\lambda x. x$, while $[t@1,t@2,\ldots,t@n]$ is

   529 represented by

   530 $\lambda x. C(t@1,C(t@2,\ldots,C(t@n,x))).$

   531 The unifiers of this with $\lambda x.\Var{f}(\Var{g}(x))$ give all the ways

   532 of expressing $[t@1,t@2,\ldots,t@n]$ as the concatenation of two lists.

   533

   534 Unlike orthodox associative unification, this technique can represent certain

   535 infinite sets of unifiers by flex-flex equations.   But note that the term

   536 $\lambda x. C(t,\Var{a})$ does not represent any list.  Flex-flex constraints

   537 containing such garbage terms may accumulate during a proof.

   538 \index{flex-flex constraints}

   539

   540 This technique lets Isabelle formalize sequent calculus rules,

   541 where the comma is the associative operator:

   542 $\infer[(\conj\hbox{-left})]   543 {\Gamma,P\conj Q,\Delta \turn \Theta}   544 {\Gamma,P,Q,\Delta \turn \Theta}$

   545 Multiple unifiers occur whenever this is resolved against a goal containing

   546 more than one conjunction on the left.

   547

   548 LK exploits this representation of lists.  As an alternative, the sequent

   549 calculus can be formalized using an ordinary representation of lists, with a

   550 logic program for removing a formula from a list.  Amy Felty has applied this

   551 technique using the language $\lambda$Prolog~\cite{felty91a}.

   552

   553 Explicit formalization of sequents can be tiresome.  But it gives precise

   554 control over contraction and weakening, and is essential to handle relevant

   555 and linear logics.

   556

   557

   558 \section{*Packaging sequent rules}\label{sec:thm-pack}

   559

   560 The sequent calculi come with simple proof procedures.  These are incomplete

   561 but are reasonably powerful for interactive use.  They expect rules to be

   562 classified as \textbf{safe} or \textbf{unsafe}.  A rule is safe if applying it to a

   563 provable goal always yields provable subgoals.  If a rule is safe then it can

   564 be applied automatically to a goal without destroying our chances of finding a

   565 proof.  For instance, all the standard rules of the classical sequent calculus

   566 {\sc lk} are safe.  An unsafe rule may render the goal unprovable; typical

   567 examples are the weakened quantifier rules {\tt allL_thin} and {\tt exR_thin}.

   568

   569 Proof procedures use safe rules whenever possible, using an unsafe rule as a

   570 last resort.  Those safe rules are preferred that generate the fewest

   571 subgoals.  Safe rules are (by definition) deterministic, while the unsafe

   572 rules require a search strategy, such as backtracking.

   573

   574 A \textbf{pack} is a pair whose first component is a list of safe rules and

   575 whose second is a list of unsafe rules.  Packs can be extended in an obvious

   576 way to allow reasoning with various collections of rules.  For clarity, LK

   577 declares \mltydx{pack} as an \ML{} datatype, although is essentially a type

   578 synonym:

   579 \begin{ttbox}

   580 datatype pack = Pack of thm list * thm list;

   581 \end{ttbox}

   582 Pattern-matching using constructor {\tt Pack} can inspect a pack's

   583 contents.  Packs support the following operations:

   584 \begin{ttbox}

   585 pack        : unit -> pack

   586 pack_of     : theory -> pack

   587 empty_pack  : pack

   588 prop_pack   : pack

   589 LK_pack     : pack

   590 LK_dup_pack : pack

   591 add_safes   : pack * thm list -> pack               \hfill\textbf{infix 4}

   592 add_unsafes : pack * thm list -> pack               \hfill\textbf{infix 4}

   593 \end{ttbox}

   594 \begin{ttdescription}

   595 \item[\ttindexbold{pack}] returns the pack attached to the current theory.

   596

   597 \item[\ttindexbold{pack_of $thy$}] returns the pack attached to theory $thy$.

   598

   599 \item[\ttindexbold{empty_pack}] is the empty pack.

   600

   601 \item[\ttindexbold{prop_pack}] contains the propositional rules, namely

   602 those for $\conj$, $\disj$, $\neg$, $\imp$ and~$\bimp$, along with the

   603 rules {\tt basic} and {\tt refl}.  These are all safe.

   604

   605 \item[\ttindexbold{LK_pack}]

   606 extends {\tt prop_pack} with the safe rules {\tt allR}

   607 and~{\tt exL} and the unsafe rules {\tt allL_thin} and

   608 {\tt exR_thin}.  Search using this is incomplete since quantified

   609 formulae are used at most once.

   610

   611 \item[\ttindexbold{LK_dup_pack}]

   612 extends {\tt prop_pack} with the safe rules {\tt allR}

   613 and~{\tt exL} and the unsafe rules \tdx{allL} and~\tdx{exR}.

   614 Search using this is complete, since quantified formulae may be reused, but

   615 frequently fails to terminate.  It is generally unsuitable for depth-first

   616 search.

   617

   618 \item[$pack$ \ttindexbold{add_safes} $rules$]

   619 adds some safe~$rules$ to the pack~$pack$.

   620

   621 \item[$pack$ \ttindexbold{add_unsafes} $rules$]

   622 adds some unsafe~$rules$ to the pack~$pack$.

   623 \end{ttdescription}

   624

   625

   626 \section{*Proof procedures}\label{sec:sequent-provers}

   627

   628 The LK proof procedure is similar to the classical reasoner described in

   629 \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%

   630             {Chap.\ts\ref{chap:classical}}.

   631 %

   632 In fact it is simpler, since it works directly with sequents rather than

   633 simulating them.  There is no need to distinguish introduction rules from

   634 elimination rules, and of course there is no swap rule.  As always,

   635 Isabelle's classical proof procedures are less powerful than resolution

   636 theorem provers.  But they are more natural and flexible, working with an

   637 open-ended set of rules.

   638

   639 Backtracking over the choice of a safe rule accomplishes nothing: applying

   640 them in any order leads to essentially the same result.  Backtracking may

   641 be necessary over basic sequents when they perform unification.  Suppose

   642 that~0, 1, 2,~3 are constants in the subgoals

   643 $\begin{array}{c}   644 P(0), P(1), P(2) \turn P(\Var{a}) \\   645 P(0), P(2), P(3) \turn P(\Var{a}) \\   646 P(1), P(3), P(2) \turn P(\Var{a})   647 \end{array}   648$

   649 The only assignment that satisfies all three subgoals is $\Var{a}\mapsto 2$,

   650 and this can only be discovered by search.  The tactics given below permit

   651 backtracking only over axioms, such as {\tt basic} and {\tt refl};

   652 otherwise they are deterministic.

   653

   654

   655 \subsection{Method A}

   656 \begin{ttbox}

   657 reresolve_tac   : thm list -> int -> tactic

   658 repeat_goal_tac : pack -> int -> tactic

   659 pc_tac          : pack -> int -> tactic

   660 \end{ttbox}

   661 These tactics use a method developed by Philippe de Groote.  A subgoal is

   662 refined and the resulting subgoals are attempted in reverse order.  For

   663 some reason, this is much faster than attempting the subgoals in order.

   664 The method is inherently depth-first.

   665

   666 At present, these tactics only work for rules that have no more than two

   667 premises.  They fail --- return no next state --- if they can do nothing.

   668 \begin{ttdescription}

   669 \item[\ttindexbold{reresolve_tac} $thms$ $i$]

   670 repeatedly applies the $thms$ to subgoal $i$ and the resulting subgoals.

   671

   672 \item[\ttindexbold{repeat_goal_tac} $pack$ $i$]

   673 applies the safe rules in the pack to a goal and the resulting subgoals.

   674 If no safe rule is applicable then it applies an unsafe rule and continues.

   675

   676 \item[\ttindexbold{pc_tac} $pack$ $i$]

   677 applies {\tt repeat_goal_tac} using depth-first search to solve subgoal~$i$.

   678 \end{ttdescription}

   679

   680

   681 \subsection{Method B}

   682 \begin{ttbox}

   683 safe_tac : pack -> int -> tactic

   684 step_tac : pack -> int -> tactic

   685 fast_tac : pack -> int -> tactic

   686 best_tac : pack -> int -> tactic

   687 \end{ttbox}

   688 These tactics are analogous to those of the generic classical

   689 reasoner.  They use Method~A' only on safe rules.  They fail if they

   690 can do nothing.

   691 \begin{ttdescription}

   692 \item[\ttindexbold{safe_goal_tac} $pack$ $i$]

   693 applies the safe rules in the pack to a goal and the resulting subgoals.

   694 It ignores the unsafe rules.

   695

   696 \item[\ttindexbold{step_tac} $pack$ $i$]

   697 either applies safe rules (using {\tt safe_goal_tac}) or applies one unsafe

   698 rule.

   699

   700 \item[\ttindexbold{fast_tac} $pack$ $i$]

   701 applies {\tt step_tac} using depth-first search to solve subgoal~$i$.

   702 Despite its name, it is frequently slower than {\tt pc_tac}.

   703

   704 \item[\ttindexbold{best_tac} $pack$ $i$]

   705 applies {\tt step_tac} using best-first search to solve subgoal~$i$.  It is

   706 particularly useful for quantifier duplication (using \ttindex{LK_dup_pack}).

   707 \end{ttdescription}

   708

   709

   710

   711 \index{sequent calculus|)}
`