removal of FOL, ZF to a separate manual
authorpaulson
Fri, 08 Jan 1999 14:02:04 +0100
changeset 6072 5583261db33d
parent 6071 1b2392ac5752
child 6073 fba734ba6894
removal of FOL, ZF to a separate manual
doc-src/Logics/CTT.tex
doc-src/Logics/FOL-eg.txt
doc-src/Logics/FOL.tex
doc-src/Logics/HOL.tex
doc-src/Logics/LK.tex
doc-src/Logics/Makefile
doc-src/Logics/ZF-eg.txt
doc-src/Logics/ZF-rules.txt
doc-src/Logics/ZF.tex
doc-src/Logics/intro.tex
doc-src/Logics/logics.bbl
doc-src/Logics/logics.ind
doc-src/Logics/logics.tex
--- a/doc-src/Logics/CTT.tex	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/CTT.tex	Fri Jan 08 14:02:04 1999 +0100
@@ -178,10 +178,10 @@
 the one-element type is $T$; other finite types are built as $T+T+T$, etc.
 
 \index{*SUM symbol}\index{*PROD symbol}
-Quantification is expressed using general sums $\sum@{x\in A}B[x]$ and
+Quantification is expressed by sums $\sum@{x\in A}B[x]$ and
 products $\prod@{x\in A}B[x]$.  Instead of {\tt Sum($A$,$B$)} and {\tt
-  Prod($A$,$B$)} we may write \hbox{\tt SUM $x$:$A$. $B[x]$} and \hbox{\tt
-  PROD $x$:$A$. $B[x]$}.  For example, we may write
+  Prod($A$,$B$)} we may write \hbox{\tt SUM $x$:$A$.\ $B[x]$} and \hbox{\tt
+  PROD $x$:$A$.\ $B[x]$}.  For example, we may write
 \begin{ttbox}
 SUM y:B. PROD x:A. C(x,y)   {\rm for}   Sum(B, \%y. Prod(A, \%x. C(x,y)))
 \end{ttbox}
--- a/doc-src/Logics/FOL-eg.txt	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,245 +0,0 @@
-(**** FOL examples ****)
-
-Pretty.setmargin 72;  (*existing macros just allow this margin*)
-print_depth 0;
-
-(*** Intuitionistic examples ***)
-
-context IFOL.thy;
-
-(*Quantifier example from Logic&Computation*)
-Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
-by (resolve_tac [impI] 1);
-by (resolve_tac [allI] 1);
-by (resolve_tac [exI] 1);
-by (eresolve_tac [exE] 1);
-choplev 2;
-by (eresolve_tac [exE] 1);
-by (resolve_tac [exI] 1);
-by (eresolve_tac [allE] 1);
-by (assume_tac 1);
-
-
-(*Example of Dyckhoff's method*)
-Goalw [not_def] "~ ~ ((P-->Q) | (Q-->P))";
-by (resolve_tac [impI] 1);
-by (eresolve_tac [disj_impE] 1);
-by (eresolve_tac [imp_impE] 1);
-by (eresolve_tac [imp_impE] 1);
-by (REPEAT (eresolve_tac [FalseE] 2));
-by (assume_tac 1);
-
-
-
-
-
-(*** Classical examples ***)
-
-context FOL.thy;
-
-Goal "EX y. ALL x. P(y)-->P(x)";
-by (resolve_tac [exCI] 1);
-by (resolve_tac [allI] 1);
-by (resolve_tac [impI] 1);
-by (eresolve_tac [allE] 1);
-prth (allI RSN (2,swap));
-by (eresolve_tac [it] 1);
-by (resolve_tac [impI] 1);
-by (eresolve_tac [notE] 1);
-by (assume_tac 1);
-Goal "EX y. ALL x. P(y)-->P(x)";
-by (Blast_tac 1);
-
-
-
-- Goal "EX y. ALL x. P(y)-->P(x)";
-Level 0
-EX y. ALL x. P(y) --> P(x)
- 1. EX y. ALL x. P(y) --> P(x)
-- by (resolve_tac [exCI] 1);
-Level 1
-EX y. ALL x. P(y) --> P(x)
- 1. ALL y. ~(ALL x. P(y) --> P(x)) ==> ALL x. P(?a) --> P(x)
-- by (resolve_tac [allI] 1);
-Level 2
-EX y. ALL x. P(y) --> P(x)
- 1. !!x. ALL y. ~(ALL x. P(y) --> P(x)) ==> P(?a) --> P(x)
-- by (resolve_tac [impI] 1);
-Level 3
-EX y. ALL x. P(y) --> P(x)
- 1. !!x. [| ALL y. ~(ALL x. P(y) --> P(x)); P(?a) |] ==> P(x)
-- by (eresolve_tac [allE] 1);
-Level 4
-EX y. ALL x. P(y) --> P(x)
- 1. !!x. [| P(?a); ~(ALL xa. P(?y3(x)) --> P(xa)) |] ==> P(x)
-- prth (allI RSN (2,swap));
-[| ~(ALL x. ?P1(x)); !!x. ~?Q ==> ?P1(x) |] ==> ?Q
-- by (eresolve_tac [it] 1);
-Level 5
-EX y. ALL x. P(y) --> P(x)
- 1. !!x xa. [| P(?a); ~P(x) |] ==> P(?y3(x)) --> P(xa)
-- by (resolve_tac [impI] 1);
-Level 6
-EX y. ALL x. P(y) --> P(x)
- 1. !!x xa. [| P(?a); ~P(x); P(?y3(x)) |] ==> P(xa)
-- by (eresolve_tac [notE] 1);
-Level 7
-EX y. ALL x. P(y) --> P(x)
- 1. !!x xa. [| P(?a); P(?y3(x)) |] ==> P(x)
-- by (assume_tac 1);
-Level 8
-EX y. ALL x. P(y) --> P(x)
-No subgoals!
-- Goal "EX y. ALL x. P(y)-->P(x)";
-Level 0
-EX y. ALL x. P(y) --> P(x)
- 1. EX y. ALL x. P(y) --> P(x)
-- by (best_tac FOL_dup_cs 1);
-Level 1
-EX y. ALL x. P(y) --> P(x)
-No subgoals!
-
-
-(**** finally, the example FOL/ex/if.ML ****)
-
-> val prems = goalw if_thy [if_def]
-#    "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)";
-Level 0
-if(P,Q,R)
- 1. P & Q | ~P & R
-> by (Classical.fast_tac (FOL_cs addIs prems) 1);
-Level 1
-if(P,Q,R)
-No subgoals!
-> val ifI = result();
-
-
-> val major::prems = goalw if_thy [if_def]
-#    "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S";
-Level 0
-S
- 1. S
-> by (cut_facts_tac [major] 1);
-Level 1
-S
- 1. P & Q | ~P & R ==> S
-> by (Classical.fast_tac (FOL_cs addIs prems) 1);
-Level 2
-S
-No subgoals!
-> val ifE = result();
-
-> goal if_thy "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))";
-Level 0
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
-> by (resolve_tac [iffI] 1);
-Level 1
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. if(P,if(Q,A,B),if(Q,C,D)) ==> if(Q,if(P,A,C),if(P,B,D))
- 2. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))
-> by (eresolve_tac [ifE] 1);
-Level 2
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. [| P; if(Q,A,B) |] ==> if(Q,if(P,A,C),if(P,B,D))
- 2. [| ~P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))
- 3. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))
-> by (eresolve_tac [ifE] 1);
-Level 3
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. [| P; Q; A |] ==> if(Q,if(P,A,C),if(P,B,D))
- 2. [| P; ~Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))
- 3. [| ~P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))
- 4. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))
-> by (resolve_tac [ifI] 1);
-Level 4
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. [| P; Q; A; Q |] ==> if(P,A,C)
- 2. [| P; Q; A; ~Q |] ==> if(P,B,D)
- 3. [| P; ~Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))
- 4. [| ~P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))
- 5. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))
-> by (resolve_tac [ifI] 1);
-Level 5
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. [| P; Q; A; Q; P |] ==> A
- 2. [| P; Q; A; Q; ~P |] ==> C
- 3. [| P; Q; A; ~Q |] ==> if(P,B,D)
- 4. [| P; ~Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))
- 5. [| ~P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))
- 6. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))
-
-> choplev 0;
-Level 0
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
- 1. if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
-> val if_cs = FOL_cs addSIs [ifI] addSEs[ifE];
-> by (Classical.fast_tac if_cs 1);
-Level 1
-if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))
-No subgoals!
-> val if_commute = result();
-
-> goal if_thy "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))";
-Level 0
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
- 1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
-> by (Classical.fast_tac if_cs 1);
-Level 1
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
-No subgoals!
-> val nested_ifs = result();
-
-
-> choplev 0;
-Level 0
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
- 1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
-> by (rewrite_goals_tac [if_def]);
-Level 1
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
- 1. (P & Q | ~P & R) & A | ~(P & Q | ~P & R) & B <->
-    P & (Q & A | ~Q & B) | ~P & (R & A | ~R & B)
-> by (Classical.fast_tac FOL_cs 1);
-Level 2
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))
-No subgoals!
-
-
-> goal if_thy "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))";
-Level 0
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
- 1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
-> by (REPEAT (Classical.step_tac if_cs 1));
-Level 1
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
- 1. [| A; ~P; R; ~P; R |] ==> B
- 2. [| B; ~P; ~R; ~P; ~R |] ==> A
- 3. [| ~P; R; B; ~P; R |] ==> A
- 4. [| ~P; ~R; A; ~B; ~P |] ==> R
-
-> choplev 0;
-Level 0
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
- 1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
-> by (rewrite_goals_tac [if_def]);
-Level 1
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
- 1. (P & Q | ~P & R) & A | ~(P & Q | ~P & R) & B <->
-    P & (Q & A | ~Q & B) | ~P & (R & B | ~R & A)
-> by (Classical.fast_tac FOL_cs 1);
-by: tactic failed
-Exception- ERROR raised
-Exception failure raised
-
-> by (REPEAT (Classical.step_tac FOL_cs 1));
-Level 2
-if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))
- 1. [| A; ~P; R; ~P; R; ~False |] ==> B
- 2. [| A; ~P; R; R; ~False; ~B; ~B |] ==> Q
- 3. [| B; ~P; ~R; ~P; ~A |] ==> R
- 4. [| B; ~P; ~R; ~Q; ~A |] ==> R
- 5. [| B; ~R; ~P; ~A; ~R; Q; ~False |] ==> A
- 6. [| ~P; R; B; ~P; R; ~False |] ==> A
- 7. [| ~P; ~R; A; ~B; ~R |] ==> P
- 8. [| ~P; ~R; A; ~B; ~R |] ==> Q
--- a/doc-src/Logics/FOL.tex	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,937 +0,0 @@
-%% $Id$
-\chapter{First-Order Logic}
-\index{first-order logic|(}
-
-Isabelle implements Gentzen's natural deduction systems {\sc nj} and {\sc
-  nk}.  Intuitionistic first-order logic is defined first, as theory
-\thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
-obtained by adding the double negation rule.  Basic proof procedures are
-provided.  The intuitionistic prover works with derived rules to simplify
-implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
-classical reasoner, which simulates a sequent calculus.
-
-\section{Syntax and rules of inference}
-The logic is many-sorted, using Isabelle's type classes.  The class of
-first-order terms is called \cldx{term} and is a subclass of \texttt{logic}.
-No types of individuals are provided, but extensions can define types such
-as \texttt{nat::term} and type constructors such as \texttt{list::(term)term}
-(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
-$\alpha$ ranges over class \texttt{term}; the equality symbol and quantifiers
-are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
-belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
-Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
-
-Figure~\ref{fol-rules} shows the inference rules with their~\ML\ names.
-Negation is defined in the usual way for intuitionistic logic; $\neg P$
-abbreviates $P\imp\bot$.  The biconditional~($\bimp$) is defined through
-$\conj$ and~$\imp$; introduction and elimination rules are derived for it.
-
-The unique existence quantifier, $\exists!x.P(x)$, is defined in terms
-of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
-quantifications.  For instance, $\exists!x\;y.P(x,y)$ abbreviates
-$\exists!x. \exists!y.P(x,y)$; note that this does not mean that there
-exists a unique pair $(x,y)$ satisfying~$P(x,y)$.
-
-Some intuitionistic derived rules are shown in
-Fig.\ts\ref{fol-int-derived}, again with their \ML\ names.  These include
-rules for the defined symbols $\neg$, $\bimp$ and $\exists!$.  Natural
-deduction typically involves a combination of forward and backward
-reasoning, particularly with the destruction rules $(\conj E)$,
-$({\imp}E)$, and~$(\forall E)$.  Isabelle's backward style handles these
-rules badly, so sequent-style rules are derived to eliminate conjunctions,
-implications, and universal quantifiers.  Used with elim-resolution,
-\tdx{allE} eliminates a universal quantifier while \tdx{all_dupE}
-re-inserts the quantified formula for later use.  The rules {\tt
-conj_impE}, etc., support the intuitionistic proof procedure
-(see~\S\ref{fol-int-prover}).
-
-See the files \texttt{FOL/IFOL.thy}, \texttt{FOL/IFOL.ML} and
-\texttt{FOL/intprover.ML} for complete listings of the rules and
-derived rules.
-
-\begin{figure} 
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Trueprop}& $o\To prop$           & coercion to $prop$\\
-  \cdx{Not}     & $o\To o$              & negation ($\neg$) \\
-  \cdx{True}    & $o$                   & tautology ($\top$) \\
-  \cdx{False}   & $o$                   & absurdity ($\bot$)
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\begin{tabular}{llrrr} 
-  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
-  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
-        universal quantifier ($\forall$) \\
-  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
-        existential quantifier ($\exists$) \\
-  \texttt{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
-        unique existence ($\exists!$)
-\end{tabular}
-\index{*"E"X"! symbol}
-\end{center}
-\subcaption{Binders} 
-
-\begin{center}
-\index{*"= symbol}
-\index{&@{\tt\&} symbol}
-\index{*"| symbol}
-\index{*"-"-"> symbol}
-\index{*"<"-"> symbol}
-\begin{tabular}{rrrr} 
-  \it symbol    & \it meta-type         & \it priority & \it description \\ 
-  \tt =         & $[\alpha,\alpha]\To o$ & Left 50 & equality ($=$) \\
-  \tt \&        & $[o,o]\To o$          & Right 35 & conjunction ($\conj$) \\
-  \tt |         & $[o,o]\To o$          & Right 30 & disjunction ($\disj$) \\
-  \tt -->       & $[o,o]\To o$          & Right 25 & implication ($\imp$) \\
-  \tt <->       & $[o,o]\To o$          & Right 25 & biconditional ($\bimp$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-
-\dquotes
-\[\begin{array}{rcl}
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " = " term \\
-         & | & term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\subcaption{Grammar}
-\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{refl}        a=a
-\tdx{subst}       [| a=b;  P(a) |] ==> P(b)
-\subcaption{Equality rules}
-
-\tdx{conjI}       [| P;  Q |] ==> P&Q
-\tdx{conjunct1}   P&Q ==> P
-\tdx{conjunct2}   P&Q ==> Q
-
-\tdx{disjI1}      P ==> P|Q
-\tdx{disjI2}      Q ==> P|Q
-\tdx{disjE}       [| P|Q;  P ==> R;  Q ==> R |] ==> R
-
-\tdx{impI}        (P ==> Q) ==> P-->Q
-\tdx{mp}          [| P-->Q;  P |] ==> Q
-
-\tdx{FalseE}      False ==> P
-\subcaption{Propositional rules}
-
-\tdx{allI}        (!!x. P(x))  ==> (ALL x.P(x))
-\tdx{spec}        (ALL x.P(x)) ==> P(x)
-
-\tdx{exI}         P(x) ==> (EX x.P(x))
-\tdx{exE}         [| EX x.P(x);  !!x. P(x) ==> R |] ==> R
-\subcaption{Quantifier rules}
-
-\tdx{True_def}    True        == False-->False
-\tdx{not_def}     ~P          == P-->False
-\tdx{iff_def}     P<->Q       == (P-->Q) & (Q-->P)
-\tdx{ex1_def}     EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)
-\subcaption{Definitions}
-\end{ttbox}
-
-\caption{Rules of intuitionistic logic} \label{fol-rules}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{sym}       a=b ==> b=a
-\tdx{trans}     [| a=b;  b=c |] ==> a=c
-\tdx{ssubst}    [| b=a;  P(a) |] ==> P(b)
-\subcaption{Derived equality rules}
-
-\tdx{TrueI}     True
-
-\tdx{notI}      (P ==> False) ==> ~P
-\tdx{notE}      [| ~P;  P |] ==> R
-
-\tdx{iffI}      [| P ==> Q;  Q ==> P |] ==> P<->Q
-\tdx{iffE}      [| P <-> Q;  [| P-->Q; Q-->P |] ==> R |] ==> R
-\tdx{iffD1}     [| P <-> Q;  P |] ==> Q            
-\tdx{iffD2}     [| P <-> Q;  Q |] ==> P
-
-\tdx{ex1I}      [| P(a);  !!x. P(x) ==> x=a |]  ==>  EX! x. P(x)
-\tdx{ex1E}      [| EX! x.P(x);  !!x.[| P(x);  ALL y. P(y) --> y=x |] ==> R 
-          |] ==> R
-\subcaption{Derived rules for \(\top\), \(\neg\), \(\bimp\) and \(\exists!\)}
-
-\tdx{conjE}     [| P&Q;  [| P; Q |] ==> R |] ==> R
-\tdx{impE}      [| P-->Q;  P;  Q ==> R |] ==> R
-\tdx{allE}      [| ALL x.P(x);  P(x) ==> R |] ==> R
-\tdx{all_dupE}  [| ALL x.P(x);  [| P(x); ALL x.P(x) |] ==> R |] ==> R
-\subcaption{Sequent-style elimination rules}
-
-\tdx{conj_impE} [| (P&Q)-->S;  P-->(Q-->S) ==> R |] ==> R
-\tdx{disj_impE} [| (P|Q)-->S;  [| P-->S; Q-->S |] ==> R |] ==> R
-\tdx{imp_impE}  [| (P-->Q)-->S;  [| P; Q-->S |] ==> Q;  S ==> R |] ==> R
-\tdx{not_impE}  [| ~P --> S;  P ==> False;  S ==> R |] ==> R
-\tdx{iff_impE}  [| (P<->Q)-->S; [| P; Q-->S |] ==> Q; [| Q; P-->S |] ==> P;
-             S ==> R |] ==> R
-\tdx{all_impE}  [| (ALL x.P(x))-->S;  !!x.P(x);  S ==> R |] ==> R
-\tdx{ex_impE}   [| (EX x.P(x))-->S;  P(a)-->S ==> R |] ==> R
-\end{ttbox}
-\subcaption{Intuitionistic simplification of implication}
-\caption{Derived rules for intuitionistic logic} \label{fol-int-derived}
-\end{figure}
-
-
-\section{Generic packages}
-\FOL{} instantiates most of Isabelle's generic packages.
-\begin{itemize}
-\item 
-It instantiates the simplifier.  Both equality ($=$) and the biconditional
-($\bimp$) may be used for rewriting.  Tactics such as \texttt{Asm_simp_tac} and
-\texttt{Full_simp_tac} refer to the default simpset (\texttt{simpset()}), which works for
-most purposes.  Named simplification sets include \ttindexbold{IFOL_ss},
-for intuitionistic first-order logic, and \ttindexbold{FOL_ss},
-for classical logic.  See the file
-\texttt{FOL/simpdata.ML} for a complete listing of the simplification
-rules%
-\iflabelundefined{sec:setting-up-simp}{}%
-        {, and \S\ref{sec:setting-up-simp} for discussion}.
-
-\item 
-It instantiates the classical reasoner.  See~\S\ref{fol-cla-prover}
-for details. 
-
-\item \FOL{} provides the tactic \ttindex{hyp_subst_tac}, which substitutes
-  for an equality throughout a subgoal and its hypotheses.  This tactic uses
-  \FOL's general substitution rule.
-\end{itemize}
-
-\begin{warn}\index{simplification!of conjunctions}%
-  Reducing $a=b\conj P(a)$ to $a=b\conj P(b)$ is sometimes advantageous.  The
-  left part of a conjunction helps in simplifying the right part.  This effect
-  is not available by default: it can be slow.  It can be obtained by
-  including \ttindex{conj_cong} in a simpset, \verb$addcongs [conj_cong]$.
-\end{warn}
-
-
-\section{Intuitionistic proof procedures} \label{fol-int-prover}
-Implication elimination (the rules~\texttt{mp} and~\texttt{impE}) pose
-difficulties for automated proof.  In intuitionistic logic, the assumption
-$P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
-use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
-use of $P\imp Q$.  If the proof of~$P$ fails then the whole branch of the
-proof must be abandoned.  Thus intuitionistic propositional logic requires
-backtracking.  
-
-For an elementary example, consider the intuitionistic proof of $Q$ from
-$P\imp Q$ and $(P\imp Q)\imp P$.  The implication $P\imp Q$ is needed
-twice:
-\[ \infer[({\imp}E)]{Q}{P\imp Q &
-       \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
-\]
-The theorem prover for intuitionistic logic does not use~\texttt{impE}.\@
-Instead, it simplifies implications using derived rules
-(Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
-to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
-The rules \tdx{conj_impE} and \tdx{disj_impE} are 
-straightforward: $(P\conj Q)\imp S$ is equivalent to $P\imp (Q\imp S)$, and
-$(P\disj Q)\imp S$ is equivalent to the conjunction of $P\imp S$ and $Q\imp
-S$.  The other \ldots{\tt_impE} rules are unsafe; the method requires
-backtracking.  All the rules are derived in the same simple manner.
-
-Dyckhoff has independently discovered similar rules, and (more importantly)
-has demonstrated their completeness for propositional
-logic~\cite{dyckhoff}.  However, the tactics given below are not complete
-for first-order logic because they discard universally quantified
-assumptions after a single use.
-\begin{ttbox} 
-mp_tac              : int -> tactic
-eq_mp_tac           : int -> tactic
-IntPr.safe_step_tac : int -> tactic
-IntPr.safe_tac      :        tactic
-IntPr.inst_step_tac : int -> tactic
-IntPr.step_tac      : int -> tactic
-IntPr.fast_tac      : int -> tactic
-IntPr.best_tac      : int -> tactic
-\end{ttbox}
-Most of these belong to the structure \texttt{IntPr} and resemble the
-tactics of Isabelle's classical reasoner.
-
-\begin{ttdescription}
-\item[\ttindexbold{mp_tac} {\it i}] 
-attempts to use \tdx{notE} or \tdx{impE} within the assumptions in
-subgoal $i$.  For each assumption of the form $\neg P$ or $P\imp Q$, it
-searches for another assumption unifiable with~$P$.  By
-contradiction with $\neg P$ it can solve the subgoal completely; by Modus
-Ponens it can replace the assumption $P\imp Q$ by $Q$.  The tactic can
-produce multiple outcomes, enumerating all suitable pairs of assumptions.
-
-\item[\ttindexbold{eq_mp_tac} {\it i}] 
-is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
-is safe.
-
-\item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
-subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
-care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
-
-\item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
-subgoals.  It is deterministic, with at most one outcome.
-
-\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
-but allows unknowns to be instantiated.
-
-\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
-    inst_step_tac}, or applies an unsafe rule.  This is the basic step of
-  the intuitionistic proof procedure.
-
-\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
-depth-first search, to solve subgoal~$i$.
-
-\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
-best-first search (guided by the size of the proof state) to solve subgoal~$i$.
-\end{ttdescription}
-Here are some of the theorems that \texttt{IntPr.fast_tac} proves
-automatically.  The latter three date from {\it Principia Mathematica}
-(*11.53, *11.55, *11.61)~\cite{principia}.
-\begin{ttbox}
-~~P & ~~(P --> Q) --> ~~Q
-(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))
-(EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))
-(EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))
-\end{ttbox}
-
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{excluded_middle}    ~P | P
-
-\tdx{disjCI}    (~Q ==> P) ==> P|Q
-\tdx{exCI}      (ALL x. ~P(x) ==> P(a)) ==> EX x.P(x)
-\tdx{impCE}     [| P-->Q; ~P ==> R; Q ==> R |] ==> R
-\tdx{iffCE}     [| P<->Q;  [| P; Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
-\tdx{notnotD}   ~~P ==> P
-\tdx{swap}      ~P ==> (~Q ==> P) ==> Q
-\end{ttbox}
-\caption{Derived rules for classical logic} \label{fol-cla-derived}
-\end{figure}
-
-
-\section{Classical proof procedures} \label{fol-cla-prover}
-The classical theory, \thydx{FOL}, consists of intuitionistic logic plus
-the rule
-$$ \vcenter{\infer{P}{\infer*{P}{[\neg P]}}} \eqno(classical) $$
-\noindent
-Natural deduction in classical logic is not really all that natural.
-{\FOL} derives classical introduction rules for $\disj$ and~$\exists$, as
-well as classical elimination rules for~$\imp$ and~$\bimp$, and the swap
-rule (see Fig.\ts\ref{fol-cla-derived}).
-
-The classical reasoner is installed.  Tactics such as \texttt{Blast_tac} and {\tt
-Best_tac} refer to the default claset (\texttt{claset()}), which works for most
-purposes.  Named clasets include \ttindexbold{prop_cs}, which includes the
-propositional rules, and \ttindexbold{FOL_cs}, which also includes quantifier
-rules.  See the file \texttt{FOL/cladata.ML} for lists of the
-classical rules, and 
-\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
-        {Chap.\ts\ref{chap:classical}} 
-for more discussion of classical proof methods.
-
-
-\section{An intuitionistic example}
-Here is a session similar to one in {\em Logic and Computation}
-\cite[pages~222--3]{paulson87}.  Isabelle treats quantifiers differently
-from {\sc lcf}-based theorem provers such as {\sc hol}.  
-
-First, we specify that we are working in intuitionistic logic:
-\begin{ttbox}
-context IFOL.thy;
-\end{ttbox}
-The proof begins by entering the goal, then applying the rule $({\imp}I)$.
-\begin{ttbox}
-Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
-{\out Level 0}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-\ttbreak
-by (resolve_tac [impI] 1);
-{\out Level 1}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. EX y. ALL x. Q(x,y) ==> ALL x. EX y. Q(x,y)}
-\end{ttbox}
-In this example, we shall never have more than one subgoal.  Applying
-$({\imp}I)$ replaces~\verb|-->| by~\verb|==>|, making
-\(\ex{y}\all{x}Q(x,y)\) an assumption.  We have the choice of
-$({\exists}E)$ and $({\forall}I)$; let us try the latter.
-\begin{ttbox}
-by (resolve_tac [allI] 1);
-{\out Level 2}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x. EX y. ALL x. Q(x,y) ==> EX y. Q(x,y)}
-\end{ttbox}
-Applying $({\forall}I)$ replaces the \texttt{ALL~x} by \hbox{\tt!!x},
-changing the universal quantifier from object~($\forall$) to
-meta~($\Forall$).  The bound variable is a {\bf parameter} of the
-subgoal.  We now must choose between $({\exists}I)$ and $({\exists}E)$.  What
-happens if the wrong rule is chosen?
-\begin{ttbox}
-by (resolve_tac [exI] 1);
-{\out Level 3}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x. EX y. ALL x. Q(x,y) ==> Q(x,?y2(x))}
-\end{ttbox}
-The new subgoal~1 contains the function variable {\tt?y2}.  Instantiating
-{\tt?y2} can replace~{\tt?y2(x)} by a term containing~\texttt{x}, even
-though~\texttt{x} is a bound variable.  Now we analyse the assumption
-\(\exists y.\forall x. Q(x,y)\) using elimination rules:
-\begin{ttbox}
-by (eresolve_tac [exE] 1);
-{\out Level 4}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x y. ALL x. Q(x,y) ==> Q(x,?y2(x))}
-\end{ttbox}
-Applying $(\exists E)$ has produced the parameter \texttt{y} and stripped the
-existential quantifier from the assumption.  But the subgoal is unprovable:
-there is no way to unify \texttt{?y2(x)} with the bound variable~\texttt{y}.
-Using \texttt{choplev} we can return to the critical point.  This time we
-apply $({\exists}E)$:
-\begin{ttbox}
-choplev 2;
-{\out Level 2}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x. EX y. ALL x. Q(x,y) ==> EX y. Q(x,y)}
-\ttbreak
-by (eresolve_tac [exE] 1);
-{\out Level 3}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x y. ALL x. Q(x,y) ==> EX y. Q(x,y)}
-\end{ttbox}
-We now have two parameters and no scheme variables.  Applying
-$({\exists}I)$ and $({\forall}E)$ produces two scheme variables, which are
-applied to those parameters.  Parameters should be produced early, as this
-example demonstrates.
-\begin{ttbox}
-by (resolve_tac [exI] 1);
-{\out Level 4}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x y. ALL x. Q(x,y) ==> Q(x,?y3(x,y))}
-\ttbreak
-by (eresolve_tac [allE] 1);
-{\out Level 5}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. !!x y. Q(?x4(x,y),y) ==> Q(x,?y3(x,y))}
-\end{ttbox}
-The subgoal has variables \texttt{?y3} and \texttt{?x4} applied to both
-parameters.  The obvious projection functions unify {\tt?x4(x,y)} with~{\tt
-x} and \verb|?y3(x,y)| with~\texttt{y}.
-\begin{ttbox}
-by (assume_tac 1);
-{\out Level 6}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out No subgoals!}
-\end{ttbox}
-The theorem was proved in six tactic steps, not counting the abandoned
-ones.  But proof checking is tedious; \ttindex{IntPr.fast_tac} proves the
-theorem in one step.
-\begin{ttbox}
-Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
-{\out Level 0}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out  1. (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-by (IntPr.fast_tac 1);
-{\out Level 1}
-{\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
-{\out No subgoals!}
-\end{ttbox}
-
-
-\section{An example of intuitionistic negation}
-The following example demonstrates the specialized forms of implication
-elimination.  Even propositional formulae can be difficult to prove from
-the basic rules; the specialized rules help considerably.  
-
-Propositional examples are easy to invent.  As Dummett notes~\cite[page
-28]{dummett}, $\neg P$ is classically provable if and only if it is
-intuitionistically provable;  therefore, $P$ is classically provable if and
-only if $\neg\neg P$ is intuitionistically provable.%
-\footnote{Of course this holds only for propositional logic, not if $P$ is
-  allowed to contain quantifiers.} Proving $\neg\neg P$ intuitionistically is
-much harder than proving~$P$ classically.
-
-Our example is the double negation of the classical tautology $(P\imp
-Q)\disj (Q\imp P)$.  When stating the goal, we command Isabelle to expand
-negations to implications using the definition $\neg P\equiv P\imp\bot$.
-This allows use of the special implication rules.
-\begin{ttbox}
-Goalw [not_def] "~ ~ ((P-->Q) | (Q-->P))";
-{\out Level 0}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. ((P --> Q) | (Q --> P) --> False) --> False}
-\end{ttbox}
-The first step is trivial.
-\begin{ttbox}
-by (resolve_tac [impI] 1);
-{\out Level 1}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. (P --> Q) | (Q --> P) --> False ==> False}
-\end{ttbox}
-By $(\imp E)$ it would suffice to prove $(P\imp Q)\disj (Q\imp P)$, but
-that formula is not a theorem of intuitionistic logic.  Instead we apply
-the specialized implication rule \tdx{disj_impE}.  It splits the
-assumption into two assumptions, one for each disjunct.
-\begin{ttbox}
-by (eresolve_tac [disj_impE] 1);
-{\out Level 2}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. [| (P --> Q) --> False; (Q --> P) --> False |] ==> False}
-\end{ttbox}
-We cannot hope to prove $P\imp Q$ or $Q\imp P$ separately, but
-their negations are inconsistent.  Applying \tdx{imp_impE} breaks down
-the assumption $\neg(P\imp Q)$, asking to show~$Q$ while providing new
-assumptions~$P$ and~$\neg Q$.
-\begin{ttbox}
-by (eresolve_tac [imp_impE] 1);
-{\out Level 3}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. [| (Q --> P) --> False; P; Q --> False |] ==> Q}
-{\out  2. [| (Q --> P) --> False; False |] ==> False}
-\end{ttbox}
-Subgoal~2 holds trivially; let us ignore it and continue working on
-subgoal~1.  Thanks to the assumption~$P$, we could prove $Q\imp P$;
-applying \tdx{imp_impE} is simpler.
-\begin{ttbox}
-by (eresolve_tac [imp_impE] 1);
-{\out Level 4}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. [| P; Q --> False; Q; P --> False |] ==> P}
-{\out  2. [| P; Q --> False; False |] ==> Q}
-{\out  3. [| (Q --> P) --> False; False |] ==> False}
-\end{ttbox}
-The three subgoals are all trivial.
-\begin{ttbox}
-by (REPEAT (eresolve_tac [FalseE] 2));
-{\out Level 5}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out  1. [| P; Q --> False; Q; P --> False |] ==> P}
-\ttbreak
-by (assume_tac 1);
-{\out Level 6}
-{\out ~ ~ ((P --> Q) | (Q --> P))}
-{\out No subgoals!}
-\end{ttbox}
-This proof is also trivial for \texttt{IntPr.fast_tac}.
-
-
-\section{A classical example} \label{fol-cla-example}
-To illustrate classical logic, we shall prove the theorem
-$\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
-follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
-$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we switch to
-classical logic:
-\begin{ttbox}
-context FOL.thy;
-\end{ttbox}
-
-The formal proof does not conform in any obvious way to the sketch given
-above.  The key inference is the first one, \tdx{exCI}; this classical
-version of~$(\exists I)$ allows multiple instantiation of the quantifier.
-\begin{ttbox}
-Goal "EX y. ALL x. P(y)-->P(x)";
-{\out Level 0}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. EX y. ALL x. P(y) --> P(x)}
-\ttbreak
-by (resolve_tac [exCI] 1);
-{\out Level 1}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. ALL y. ~ (ALL x. P(y) --> P(x)) ==> ALL x. P(?a) --> P(x)}
-\end{ttbox}
-We can either exhibit a term {\tt?a} to satisfy the conclusion of
-subgoal~1, or produce a contradiction from the assumption.  The next
-steps are routine.
-\begin{ttbox}
-by (resolve_tac [allI] 1);
-{\out Level 2}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x. ALL y. ~ (ALL x. P(y) --> P(x)) ==> P(?a) --> P(x)}
-\ttbreak
-by (resolve_tac [impI] 1);
-{\out Level 3}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x. [| ALL y. ~ (ALL x. P(y) --> P(x)); P(?a) |] ==> P(x)}
-\end{ttbox}
-By the duality between $\exists$ and~$\forall$, applying~$(\forall E)$
-in effect applies~$(\exists I)$ again.
-\begin{ttbox}
-by (eresolve_tac [allE] 1);
-{\out Level 4}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x. [| P(?a); ~ (ALL xa. P(?y3(x)) --> P(xa)) |] ==> P(x)}
-\end{ttbox}
-In classical logic, a negated assumption is equivalent to a conclusion.  To
-get this effect, we create a swapped version of~$(\forall I)$ and apply it
-using \ttindex{eresolve_tac}; we could equivalently have applied~$(\forall
-I)$ using \ttindex{swap_res_tac}.
-\begin{ttbox}
-allI RSN (2,swap);
-{\out val it = "[| ~ (ALL x. ?P1(x)); !!x. ~ ?Q ==> ?P1(x) |] ==> ?Q" : thm}
-by (eresolve_tac [it] 1);
-{\out Level 5}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x xa. [| P(?a); ~ P(x) |] ==> P(?y3(x)) --> P(xa)}
-\end{ttbox}
-The previous conclusion, \texttt{P(x)}, has become a negated assumption.
-\begin{ttbox}
-by (resolve_tac [impI] 1);
-{\out Level 6}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x xa. [| P(?a); ~ P(x); P(?y3(x)) |] ==> P(xa)}
-\end{ttbox}
-The subgoal has three assumptions.  We produce a contradiction between the
-\index{assumptions!contradictory} assumptions~\verb|~P(x)| and~{\tt
-  P(?y3(x))}.  The proof never instantiates the unknown~{\tt?a}.
-\begin{ttbox}
-by (eresolve_tac [notE] 1);
-{\out Level 7}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. !!x xa. [| P(?a); P(?y3(x)) |] ==> P(x)}
-\ttbreak
-by (assume_tac 1);
-{\out Level 8}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out No subgoals!}
-\end{ttbox}
-The civilised way to prove this theorem is through \ttindex{Blast_tac},
-which automatically uses the classical version of~$(\exists I)$:
-\begin{ttbox}
-Goal "EX y. ALL x. P(y)-->P(x)";
-{\out Level 0}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out  1. EX y. ALL x. P(y) --> P(x)}
-by (Blast_tac 1);
-{\out Depth = 0}
-{\out Depth = 1}
-{\out Depth = 2}
-{\out Level 1}
-{\out EX y. ALL x. P(y) --> P(x)}
-{\out No subgoals!}
-\end{ttbox}
-If this theorem seems counterintuitive, then perhaps you are an
-intuitionist.  In constructive logic, proving $\ex{y}\all{x}P(y)\imp P(x)$
-requires exhibiting a particular term~$t$ such that $\all{x}P(t)\imp P(x)$,
-which we cannot do without further knowledge about~$P$.
-
-
-\section{Derived rules and the classical tactics}
-Classical first-order logic can be extended with the propositional
-connective $if(P,Q,R)$, where 
-$$ if(P,Q,R) \equiv P\conj Q \disj \neg P \conj R. \eqno(if) $$
-Theorems about $if$ can be proved by treating this as an abbreviation,
-replacing $if(P,Q,R)$ by $P\conj Q \disj \neg P \conj R$ in subgoals.  But
-this duplicates~$P$, causing an exponential blowup and an unreadable
-formula.  Introducing further abbreviations makes the problem worse.
-
-Natural deduction demands rules that introduce and eliminate $if(P,Q,R)$
-directly, without reference to its definition.  The simple identity
-\[ if(P,Q,R) \,\bimp\, (P\imp Q)\conj (\neg P\imp R) \]
-suggests that the
-$if$-introduction rule should be
-\[ \infer[({if}\,I)]{if(P,Q,R)}{\infer*{Q}{[P]}  &  \infer*{R}{[\neg P]}} \]
-The $if$-elimination rule reflects the definition of $if(P,Q,R)$ and the
-elimination rules for~$\disj$ and~$\conj$.
-\[ \infer[({if}\,E)]{S}{if(P,Q,R) & \infer*{S}{[P,Q]}
-                                  & \infer*{S}{[\neg P,R]}} 
-\]
-Having made these plans, we get down to work with Isabelle.  The theory of
-classical logic, \texttt{FOL}, is extended with the constant
-$if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
-equation~$(if)$.
-\begin{ttbox}
-If = FOL +
-consts  if     :: [o,o,o]=>o
-rules   if_def "if(P,Q,R) == P&Q | ~P&R"
-end
-\end{ttbox}
-We create the file \texttt{If.thy} containing these declarations.  (This file
-is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
-\begin{ttbox}
-use_thy "If";  
-\end{ttbox}
-loads that theory and sets it to be the current context.
-
-
-\subsection{Deriving the introduction rule}
-
-The derivations of the introduction and elimination rules demonstrate the
-methods for rewriting with definitions.  Classical reasoning is required,
-so we use \texttt{blast_tac}.
-
-The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
-concludes $if(P,Q,R)$.  We propose the conclusion as the main goal
-using~\ttindex{Goalw}, which uses \texttt{if_def} to rewrite occurrences
-of $if$ in the subgoal.
-\begin{ttbox}
-val prems = Goalw [if_def]
-    "[| P ==> Q; ~ P ==> R |] ==> if(P,Q,R)";
-{\out Level 0}
-{\out if(P,Q,R)}
-{\out  1. P & Q | ~ P & R}
-\end{ttbox}
-The premises (bound to the {\ML} variable \texttt{prems}) are passed as
-introduction rules to \ttindex{blast_tac}.  Remember that \texttt{claset()} refers
-to the default classical set.
-\begin{ttbox}
-by (blast_tac (claset() addIs prems) 1);
-{\out Level 1}
-{\out if(P,Q,R)}
-{\out No subgoals!}
-qed "ifI";
-\end{ttbox}
-
-
-\subsection{Deriving the elimination rule}
-The elimination rule has three premises, two of which are themselves rules.
-The conclusion is simply $S$.
-\begin{ttbox}
-val major::prems = Goalw [if_def]
-   "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~ P; R |] ==> S |] ==> S";
-{\out Level 0}
-{\out S}
-{\out  1. S}
-\end{ttbox}
-The major premise contains an occurrence of~$if$, but the version returned
-by \ttindex{Goalw} (and bound to the {\ML} variable~\texttt{major}) has the
-definition expanded.  Now \ttindex{cut_facts_tac} inserts~\texttt{major} as an
-assumption in the subgoal, so that \ttindex{blast_tac} can break it down.
-\begin{ttbox}
-by (cut_facts_tac [major] 1);
-{\out Level 1}
-{\out S}
-{\out  1. P & Q | ~ P & R ==> S}
-\ttbreak
-by (blast_tac (claset() addIs prems) 1);
-{\out Level 2}
-{\out S}
-{\out No subgoals!}
-qed "ifE";
-\end{ttbox}
-As you may recall from
-\iflabelundefined{definitions}{{\em Introduction to Isabelle}}%
-        {\S\ref{definitions}}, there are other
-ways of treating definitions when deriving a rule.  We can start the
-proof using \texttt{Goal}, which does not expand definitions, instead of
-\texttt{Goalw}.  We can use \ttindex{rew_tac}
-to expand definitions in the subgoals---perhaps after calling
-\ttindex{cut_facts_tac} to insert the rule's premises.  We can use
-\ttindex{rewrite_rule}, which is a meta-inference rule, to expand
-definitions in the premises directly.
-
-
-\subsection{Using the derived rules}
-The rules just derived have been saved with the {\ML} names \tdx{ifI}
-and~\tdx{ifE}.  They permit natural proofs of theorems such as the
-following:
-\begin{eqnarray*}
-    if(P, if(Q,A,B), if(Q,C,D)) & \bimp & if(Q,if(P,A,C),if(P,B,D)) \\
-    if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
-\end{eqnarray*}
-Proofs also require the classical reasoning rules and the $\bimp$
-introduction rule (called~\tdx{iffI}: do not confuse with~\texttt{ifI}). 
-
-To display the $if$-rules in action, let us analyse a proof step by step.
-\begin{ttbox}
-Goal "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))";
-{\out Level 0}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-\ttbreak
-by (resolve_tac [iffI] 1);
-{\out Level 1}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. if(P,if(Q,A,B),if(Q,C,D)) ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  2. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))}
-\end{ttbox}
-The $if$-elimination rule can be applied twice in succession.
-\begin{ttbox}
-by (eresolve_tac [ifE] 1);
-{\out Level 2}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. [| P; if(Q,A,B) |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  2. [| ~ P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  3. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))}
-\ttbreak
-by (eresolve_tac [ifE] 1);
-{\out Level 3}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. [| P; Q; A |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  2. [| P; ~ Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  3. [| ~ P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  4. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))}
-\end{ttbox}
-%
-In the first two subgoals, all assumptions have been reduced to atoms.  Now
-$if$-introduction can be applied.  Observe how the $if$-rules break down
-occurrences of $if$ when they become the outermost connective.
-\begin{ttbox}
-by (resolve_tac [ifI] 1);
-{\out Level 4}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. [| P; Q; A; Q |] ==> if(P,A,C)}
-{\out  2. [| P; Q; A; ~ Q |] ==> if(P,B,D)}
-{\out  3. [| P; ~ Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  4. [| ~ P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  5. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))}
-\ttbreak
-by (resolve_tac [ifI] 1);
-{\out Level 5}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. [| P; Q; A; Q; P |] ==> A}
-{\out  2. [| P; Q; A; Q; ~ P |] ==> C}
-{\out  3. [| P; Q; A; ~ Q |] ==> if(P,B,D)}
-{\out  4. [| P; ~ Q; B |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  5. [| ~ P; if(Q,C,D) |] ==> if(Q,if(P,A,C),if(P,B,D))}
-{\out  6. if(Q,if(P,A,C),if(P,B,D)) ==> if(P,if(Q,A,B),if(Q,C,D))}
-\end{ttbox}
-Where do we stand?  The first subgoal holds by assumption; the second and
-third, by contradiction.  This is getting tedious.  We could use the classical
-reasoner, but first let us extend the default claset with the derived rules
-for~$if$.
-\begin{ttbox}
-AddSIs [ifI];
-AddSEs [ifE];
-\end{ttbox}
-Now we can revert to the
-initial proof state and let \ttindex{blast_tac} solve it.  
-\begin{ttbox}
-choplev 0;
-{\out Level 0}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out  1. if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-by (Blast_tac 1);
-{\out Level 1}
-{\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
-{\out No subgoals!}
-\end{ttbox}
-This tactic also solves the other example.
-\begin{ttbox}
-Goal "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))";
-{\out Level 0}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-{\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-\ttbreak
-by (Blast_tac 1);
-{\out Level 1}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-{\out No subgoals!}
-\end{ttbox}
-
-
-\subsection{Derived rules versus definitions}
-Dispensing with the derived rules, we can treat $if$ as an
-abbreviation, and let \ttindex{blast_tac} prove the expanded formula.  Let
-us redo the previous proof:
-\begin{ttbox}
-choplev 0;
-{\out Level 0}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-{\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-\end{ttbox}
-This time, simply unfold using the definition of $if$:
-\begin{ttbox}
-by (rewtac if_def);
-{\out Level 1}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-{\out  1. (P & Q | ~ P & R) & A | ~ (P & Q | ~ P & R) & B <->}
-{\out     P & (Q & A | ~ Q & B) | ~ P & (R & A | ~ R & B)}
-\end{ttbox}
-We are left with a subgoal in pure first-order logic, which is why the 
-classical reasoner can prove it given \texttt{FOL_cs} alone.  (We could, of 
-course, have used \texttt{Blast_tac}.)
-\begin{ttbox}
-by (blast_tac FOL_cs 1);
-{\out Level 2}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
-{\out No subgoals!}
-\end{ttbox}
-Expanding definitions reduces the extended logic to the base logic.  This
-approach has its merits --- especially if the prover for the base logic is
-good --- but can be slow.  In these examples, proofs using the default
-claset (which includes the derived rules) run about six times faster 
-than proofs using \texttt{FOL_cs}.
-
-Expanding definitions also complicates error diagnosis.  Suppose we are having
-difficulties in proving some goal.  If by expanding definitions we have
-made it unreadable, then we have little hope of diagnosing the problem.
-
-Attempts at program verification often yield invalid assertions.
-Let us try to prove one:
-\begin{ttbox}
-Goal "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))";
-{\out Level 0}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-{\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-by (Blast_tac 1);
-{\out by: tactic failed}
-\end{ttbox}
-This failure message is uninformative, but we can get a closer look at the
-situation by applying \ttindex{Step_tac}.
-\begin{ttbox}
-by (REPEAT (Step_tac 1));
-{\out Level 1}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-{\out  1. [| A; ~ P; R; ~ P; R |] ==> B}
-{\out  2. [| B; ~ P; ~ R; ~ P; ~ R |] ==> A}
-{\out  3. [| ~ P; R; B; ~ P; R |] ==> A}
-{\out  4. [| ~ P; ~ R; A; ~ B; ~ P |] ==> R}
-\end{ttbox}
-Subgoal~1 is unprovable and yields a countermodel: $P$ and~$B$ are false
-while~$R$ and~$A$ are true.  This truth assignment reduces the main goal to
-$true\bimp false$, which is of course invalid.
-
-We can repeat this analysis by expanding definitions, using just
-the rules of {\FOL}:
-\begin{ttbox}
-choplev 0;
-{\out Level 0}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-{\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-\ttbreak
-by (rewtac if_def);
-{\out Level 1}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-{\out  1. (P & Q | ~ P & R) & A | ~ (P & Q | ~ P & R) & B <->}
-{\out     P & (Q & A | ~ Q & B) | ~ P & (R & B | ~ R & A)}
-by (blast_tac FOL_cs 1);
-{\out by: tactic failed}
-\end{ttbox}
-Again we apply \ttindex{step_tac}:
-\begin{ttbox}
-by (REPEAT (step_tac FOL_cs 1));
-{\out Level 2}
-{\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
-{\out  1. [| A; ~ P; R; ~ P; R; ~ False |] ==> B}
-{\out  2. [| A; ~ P; R; R; ~ False; ~ B; ~ B |] ==> Q}
-{\out  3. [| B; ~ P; ~ R; ~ P; ~ A |] ==> R}
-{\out  4. [| B; ~ P; ~ R; ~ Q; ~ A |] ==> R}
-{\out  5. [| B; ~ R; ~ P; ~ A; ~ R; Q; ~ False |] ==> A}
-{\out  6. [| ~ P; R; B; ~ P; R; ~ False |] ==> A}
-{\out  7. [| ~ P; ~ R; A; ~ B; ~ R |] ==> P}
-{\out  8. [| ~ P; ~ R; A; ~ B; ~ R |] ==> Q}
-\end{ttbox}
-Subgoal~1 yields the same countermodel as before.  But each proof step has
-taken six times as long, and the final result contains twice as many subgoals.
-
-Expanding definitions causes a great increase in complexity.  This is why
-the classical prover has been designed to accept derived rules.
-
-\index{first-order logic|)}
--- a/doc-src/Logics/HOL.tex	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/HOL.tex	Fri Jan 08 14:02:04 1999 +0100
@@ -905,10 +905,10 @@
 
 \subsection{Simplification and substitution}
 
-The simplifier is available in \HOL.  Tactics such as {\tt
+Simplification tactics tactics such as {\tt
   Asm_simp_tac} and \texttt{Full_simp_tac} use the default simpset
 ({\tt simpset()}), which works for most purposes.  A quite minimal
-simplification set for higher-order logic is~\ttindexbold{HOL_ss},
+simplification set for higher-order logic is~\ttindexbold{HOL_ss};
 even more frugal is \ttindexbold{HOL_basic_ss}.  Equality~($=$), which
 also expresses logical equivalence, may be used for rewriting.  See
 the file \texttt{HOL/simpdata.ML} for a complete listing of the basic
@@ -2010,6 +2010,7 @@
 Theory \texttt{Arith} declares a generic function \texttt{size} of type
 $\alpha\To nat$.  Each datatype defines a particular instance of \texttt{size}
 by overloading according to the following scheme:
+%%% FIXME: This formula is too big and is completely unreadable
 \[
 size(C^j@i~x@1~\dots~x@{m^j@i}) = \!
 \left\{
--- a/doc-src/Logics/LK.tex	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/LK.tex	Fri Jan 08 14:02:04 1999 +0100
@@ -333,12 +333,21 @@
 \end{ttdescription}
 
 
+\section{Packaging sequent rules}
 
-\section{Packaging sequent rules}
-Section~\ref{sec:safe} described the distinction between safe and unsafe
-rules.  An unsafe rule may reduce a provable goal to an unprovable set of
-subgoals, and should only be used as a last resort.  Typical examples are
-the weakened quantifier rules {\tt allL_thin} and {\tt exR_thin}.
+The sequent calculi come with simple proof procedures.  These are incomplete
+but are reasonably powerful for interactive use.  They expect rules to be
+classified as {\bf safe} or {\bf unsafe}.  A rule is safe if applying it to a
+provable goal always yields provable subgoals.  If a rule is safe then it can
+be applied automatically to a goal without destroying our chances of finding a
+proof.  For instance, all the standard rules of the classical sequent calculus
+{\sc lk} are safe.  An unsafe rule may render the goal unprovable; typical
+examples are the weakened quantifier rules {\tt allL_thin} and {\tt exR_thin}.
+
+Proof procedures use safe rules whenever possible, using an unsafe rule as a
+last resort.  Those safe rules are preferred that generate the fewest
+subgoals.  Safe rules are (by definition) deterministic, while the unsafe
+rules require a search strategy, such as backtracking.
 
 A {\bf pack} is a pair whose first component is a list of safe rules and
 whose second is a list of unsafe rules.  Packs can be extended in an
@@ -387,6 +396,7 @@
 
 
 \section{Proof procedures}
+
 The \LK{} proof procedure is similar to the classical reasoner
 described in 
 \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
@@ -475,7 +485,7 @@
 The theorem $\turn\ex{y}\all{x}P(y)\imp P(x)$ is a standard example of the
 classical treatment of the existential quantifier.  Classical reasoning
 is easy using~{\LK}, as you can see by comparing this proof with the one
-given in~\S\ref{fol-cla-example}.  From a logical point of view, the
+given in the FOL manual~\cite{isabelle-ZF}.  From a logical point of view, the
 proofs are essentially the same; the key step here is to use \tdx{exR}
 rather than the weaker~\tdx{exR_thin}.
 \begin{ttbox}
--- a/doc-src/Logics/Makefile	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/Makefile	Fri Jan 08 14:02:04 1999 +0100
@@ -6,7 +6,7 @@
 #########################################################################
 
 
-FILES =  logics.tex intro.tex FOL.tex ZF.tex HOL.tex LK.tex CTT.tex\
+FILES =  logics.tex preface.tex syntax.tex HOL.tex LK.tex CTT.tex\
 	 ../rail.sty ../proof.sty ../iman.sty ../extra.sty
 
 logics.dvi.gz:   $(FILES) 
--- a/doc-src/Logics/ZF-eg.txt	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,230 +0,0 @@
-(**** ZF examples ****)
-
-Pretty.setmargin 72;  (*existing macros just allow this margin*)
-print_depth 0;
-
-(*** Powerset example ***)
-
-val [prem] = goal ZF.thy "A<=B  ==>  Pow(A) <= Pow(B)";
-by (resolve_tac [subsetI] 1);
-by (resolve_tac [PowI] 1);
-by (dresolve_tac [PowD] 1);
-by (eresolve_tac [subset_trans] 1);
-by (resolve_tac [prem] 1);
-val Pow_mono = result();
-
-goal ZF.thy "Pow(A Int B) = Pow(A) Int Pow(B)";
-by (resolve_tac [equalityI] 1);
-by (resolve_tac [Int_greatest] 1);
-by (resolve_tac [Int_lower1 RS Pow_mono] 1);
-by (resolve_tac [Int_lower2 RS Pow_mono] 1);
-by (resolve_tac [subsetI] 1);
-by (eresolve_tac [IntE] 1);
-by (resolve_tac [PowI] 1);
-by (REPEAT (dresolve_tac [PowD] 1));
-by (resolve_tac [Int_greatest] 1);
-by (REPEAT (assume_tac 1));
-choplev 0;
-by (fast_tac (ZF_cs addIs [equalityI]) 1);
-
-Goal "C<=D ==> Union(C) <= Union(D)";
-by (resolve_tac [subsetI] 1);
-by (eresolve_tac [UnionE] 1);
-by (resolve_tac [UnionI] 1);
-by (eresolve_tac [subsetD] 1);
-by (assume_tac 1);
-by (assume_tac 1);
-choplev 0;
-by (resolve_tac [Union_least] 1);
-by (resolve_tac [Union_upper] 1);
-by (eresolve_tac [subsetD] 1);
-
-
-val prems = goal ZF.thy
-    "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \
-\    (f Un g)`a = f`a";
-by (resolve_tac [apply_equality] 1);
-by (resolve_tac [UnI1] 1);
-by (resolve_tac [apply_Pair] 1);
-by (resolve_tac prems 1);
-by (resolve_tac prems 1);
-by (resolve_tac [fun_disjoint_Un] 1);
-by (resolve_tac prems 1);
-by (resolve_tac prems 1);
-by (resolve_tac prems 1);
-
-
-Goal "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \
-\     (f Un g)`a = f`a";
-by (resolve_tac [apply_equality] 1);
-by (resolve_tac [UnI1] 1);
-by (resolve_tac [apply_Pair] 1);
-by (assume_tac 1);
-by (assume_tac 1);
-by (resolve_tac [fun_disjoint_Un] 1);
-by (assume_tac 1);
-by (assume_tac 1);
-by (assume_tac 1);
-
-
-
-
-goal ZF.thy "f``(UN x:A. B(x)) = (UN x:A. f``B(x))";
-by (resolve_tac [equalityI] 1);
-by (resolve_tac [subsetI] 1);
-fe imageE;
-
-
-goal ZF.thy "(UN x:C. A(x) Int B) = (UN x:C. A(x))  Int  B";
-by (resolve_tac [equalityI] 1);
-by (resolve_tac [Int_greatest] 1);
-fr UN_mono;
-by (resolve_tac [Int_lower1] 1);
-fr UN_least;
-????
-
-
-> goal ZF.thy "Pow(A Int B) = Pow(A) Int Pow(B)";
-Level 0
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A Int B) = Pow(A) Int Pow(B)
-> by (resolve_tac [equalityI] 1);
-Level 1
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A Int B) <= Pow(A) Int Pow(B)
- 2. Pow(A) Int Pow(B) <= Pow(A Int B)
-> by (resolve_tac [Int_greatest] 1);
-Level 2
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A Int B) <= Pow(A)
- 2. Pow(A Int B) <= Pow(B)
- 3. Pow(A) Int Pow(B) <= Pow(A Int B)
-> by (resolve_tac [Int_lower1 RS Pow_mono] 1);
-Level 3
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A Int B) <= Pow(B)
- 2. Pow(A) Int Pow(B) <= Pow(A Int B)
-> by (resolve_tac [Int_lower2 RS Pow_mono] 1);
-Level 4
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A) Int Pow(B) <= Pow(A Int B)
-> by (resolve_tac [subsetI] 1);
-Level 5
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. !!x. x : Pow(A) Int Pow(B) ==> x : Pow(A Int B)
-> by (eresolve_tac [IntE] 1);
-Level 6
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. !!x. [| x : Pow(A); x : Pow(B) |] ==> x : Pow(A Int B)
-> by (resolve_tac [PowI] 1);
-Level 7
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. !!x. [| x : Pow(A); x : Pow(B) |] ==> x <= A Int B
-> by (REPEAT (dresolve_tac [PowD] 1));
-Level 8
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. !!x. [| x <= A; x <= B |] ==> x <= A Int B
-> by (resolve_tac [Int_greatest] 1);
-Level 9
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. !!x. [| x <= A; x <= B |] ==> x <= A
- 2. !!x. [| x <= A; x <= B |] ==> x <= B
-> by (REPEAT (assume_tac 1));
-Level 10
-Pow(A Int B) = Pow(A) Int Pow(B)
-No subgoals!
-> choplev 0;
-Level 0
-Pow(A Int B) = Pow(A) Int Pow(B)
- 1. Pow(A Int B) = Pow(A) Int Pow(B)
-> by (fast_tac (ZF_cs addIs [equalityI]) 1);
-Level 1
-Pow(A Int B) = Pow(A) Int Pow(B)
-No subgoals!
-
-
-
-
-> val [prem] = goal ZF.thy "C<=D ==> Union(C) <= Union(D)";
-Level 0
-Union(C) <= Union(D)
- 1. Union(C) <= Union(D)
-> by (resolve_tac [subsetI] 1);
-Level 1
-Union(C) <= Union(D)
- 1. !!x. x : Union(C) ==> x : Union(D)
-> by (eresolve_tac [UnionE] 1);
-Level 2
-Union(C) <= Union(D)
- 1. !!x B. [| x : B; B : C |] ==> x : Union(D)
-> by (resolve_tac [UnionI] 1);
-Level 3
-Union(C) <= Union(D)
- 1. !!x B. [| x : B; B : C |] ==> ?B2(x,B) : D
- 2. !!x B. [| x : B; B : C |] ==> x : ?B2(x,B)
-> by (resolve_tac [prem RS subsetD] 1);
-Level 4
-Union(C) <= Union(D)
- 1. !!x B. [| x : B; B : C |] ==> ?B2(x,B) : C
- 2. !!x B. [| x : B; B : C |] ==> x : ?B2(x,B)
-> by (assume_tac 1);
-Level 5
-Union(C) <= Union(D)
- 1. !!x B. [| x : B; B : C |] ==> x : B
-> by (assume_tac 1);
-Level 6
-Union(C) <= Union(D)
-No subgoals!
-
-
-
-> val prems = goal ZF.thy
-#     "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \
-# \    (f Un g)`a = f`a";
-Level 0
-(f Un g) ` a = f ` a
- 1. (f Un g) ` a = f ` a
-> by (resolve_tac [apply_equality] 1);
-Level 1
-(f Un g) ` a = f ` a
- 1. <a,f ` a> : f Un g
- 2. f Un g : (PROD x:?A. ?B(x))
-> by (resolve_tac [UnI1] 1);
-Level 2
-(f Un g) ` a = f ` a
- 1. <a,f ` a> : f
- 2. f Un g : (PROD x:?A. ?B(x))
-> by (resolve_tac [apply_Pair] 1);
-Level 3
-(f Un g) ` a = f ` a
- 1. f : (PROD x:?A2. ?B2(x))
- 2. a : ?A2
- 3. f Un g : (PROD x:?A. ?B(x))
-> by (resolve_tac prems 1);
-Level 4
-(f Un g) ` a = f ` a
- 1. a : A
- 2. f Un g : (PROD x:?A. ?B(x))
-> by (resolve_tac prems 1);
-Level 5
-(f Un g) ` a = f ` a
- 1. f Un g : (PROD x:?A. ?B(x))
-> by (resolve_tac [fun_disjoint_Un] 1);
-Level 6
-(f Un g) ` a = f ` a
- 1. f : ?A3 -> ?B3
- 2. g : ?C3 -> ?D3
- 3. ?A3 Int ?C3 = 0
-> by (resolve_tac prems 1);
-Level 7
-(f Un g) ` a = f ` a
- 1. g : ?C3 -> ?D3
- 2. A Int ?C3 = 0
-> by (resolve_tac prems 1);
-Level 8
-(f Un g) ` a = f ` a
- 1. A Int C = 0
-> by (resolve_tac prems 1);
-Level 9
-(f Un g) ` a = f ` a
-No subgoals!
--- a/doc-src/Logics/ZF-rules.txt	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,468 +0,0 @@
-%%%% RULES.ML
-
-\idx{empty_set}    ~(x:0)
-\idx{union_iff}    A:Union(C) <-> (EX B:C. A:B)
-\idx{power_set}    A : Pow(B) <-> A <= B
-\idx{infinity}     0:Inf & (ALL y:Inf. succ(y): Inf)
-\idx{foundation}   A=0 | (EX x:A. ALL y:x. ~ y:A)
-
-\idx{replacement}  (!!x y z.[| x:A; P(x,y); P(x,z) |] ==> y=z) ==>
-                   y : PrimReplace(A,P) <-> (EX x:A. P(x,y))
-
-\idx{Replace_def}  Replace(A,P) == PrimReplace(A, %x y. (EX!z.P(x,z)) & P(x,y))
-\idx{RepFun_def}   RepFun(A,f) == Replace(A, %x u. u=f(x))
-\idx{Collect_def}  Collect(A,P) == \{ y . x:A, x=y & P(x)\}
-\idx{the_def}      The(P) == Union(\{y . x:\{0\}, P(y)\})
-
-\idx{Upair_def}    Upair(a,b) == 
-                   \{y. x:Pow(Pow(0)), (x=0 & y=a) | (x=Pow(0) & y=b)\}
-
-\idx{Inter_def}    Inter(A) == \{ x:Union(A) . ALL y:A. x:y\}
-
-\idx{Un_def}       A Un  B  == Union(Upair(A,B))
-\idx{Int_def}      A Int B  == Inter(Upair(A,B))
-\idx{Diff_def}     A - B    == \{ x:A . ~(x:B) \}
-\idx{cons_def}     cons(a,A) == Upair(a,a) Un A
-\idx{succ_def}     succ(i) == cons(i,i)
-
-\idx{Pair_def}     <a,b>  == \{\{a,a\}, \{a,b\}\}
-\idx{fst_def}      fst(A) == THE x. EX y. A=<x,y>
-\idx{snd_def}      snd(A) == THE y. EX x. A=<x,y>
-\idx{split_def}    split(p,c) == THE y. EX a b. p=<a,b> & y=c(a,b)
-\idx{Sigma_def}    Sigma(A,B) == UN x:A. UN y:B(x). \{<x,y>\}
-
-\idx{domain_def}   domain(r) == \{a:Union(Union(r)) . EX b. <a,b> : r\}
-\idx{range_def}    range(r) == \{b:Union(Union(r)) . EX a. <a,b> : r\}
-\idx{field_def}    field(r) == domain(r) Un range(r)
-\idx{image_def}    r``A == \{y : range(r) . EX x:A. <x,y> : r\}
-\idx{vimage_def}   r -`` A == \{x : domain(r) . EX y:A. <x,y> : r\}
-
-\idx{lam_def}      Lambda(A,f) == RepFun(A, %x. <x,f(x)>)
-\idx{apply_def}    f`a == THE y. <a,y> : f
-\idx{restrict_def} restrict(f,A) == lam x:A.f`x
-\idx{Pi_def}       Pi(A,B)  == \{f: Pow(Sigma(A,B)). ALL x:A. EX! y. <x,y>: f\}
-
-\idx{subset_def}         A <= B == ALL x:A. x:B
-\idx{strict_subset_def}  A <! B   == A <=B & ~(A=B)
-\idx{extension}          A = B <-> A <= B & B <= A
-
-\idx{Ball_def}   Ball(A,P) == ALL x. x:A --> P(x)
-\idx{Bex_def}    Bex(A,P) == EX x. x:A & P(x)
-
-
-%%%% LEMMAS.ML
-
-\idx{ballI}       [| !!x. x:A ==> P(x) |] ==> ALL x:A. P(x)
-\idx{bspec}       [| ALL x:A. P(x);  x: A |] ==> P(x)
-\idx{ballE}       [| ALL x:A. P(x);  P(x) ==> Q;  ~ x:A ==> Q |] ==> Q
-
-\idx{ball_cong}   [| A=A';  !!x. x:A' ==> P(x) <-> P'(x) |] ==> 
-            (ALL x:A. P(x)) <-> (ALL x:A'. P'(x))
-
-\idx{bexI}        [| P(x);  x: A |] ==> EX x:A. P(x)
-\idx{bexCI}       [| ALL x:A. ~P(x) ==> P(a);  a: A |] ==> EX x:A.P(x)
-\idx{bexE}        [| EX x:A. P(x);  !!x. [| x:A; P(x) |] ==> Q |] ==> Q
-
-\idx{bex_cong}    [| A=A';  !!x. x:A' ==> P(x) <-> P'(x) |] ==> 
-            (EX x:A. P(x)) <-> (EX x:A'. P'(x))
-
-\idx{subsetI}       (!!x.x:A ==> x:B) ==> A <= B
-\idx{subsetD}       [| A <= B;  c:A |] ==> c:B
-\idx{subsetCE}      [| A <= B;  ~(c:A) ==> P;  c:B ==> P |] ==> P
-\idx{subset_refl}   A <= A
-\idx{subset_trans}  [| A<=B;  B<=C |] ==> A<=C
-
-\idx{equalityI}     [| A <= B;  B <= A |] ==> A = B
-\idx{equalityD1}    A = B ==> A<=B
-\idx{equalityD2}    A = B ==> B<=A
-\idx{equalityE}     [| A = B;  [| A<=B; B<=A |] ==> P |]  ==>  P
-
-\idx{emptyE}          a:0 ==> P
-\idx{empty_subsetI}   0 <= A
-\idx{equals0I}        [| !!y. y:A ==> False |] ==> A=0
-\idx{equals0D}        [| A=0;  a:A |] ==> P
-
-\idx{PowI}            A <= B ==> A : Pow(B)
-\idx{PowD}            A : Pow(B)  ==>  A<=B
-
-\idx{ReplaceI}      [| x: A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
-              b : \{y. x:A, P(x,y)\}
-
-\idx{ReplaceE}      [| b : \{y. x:A, P(x,y)\};  
-                 !!x. [| x: A;  P(x,b);  ALL y. P(x,y)-->y=b |] ==> R 
-              |] ==> R
-
-\idx{Replace_cong}  [| A=B;  !!x y. x:B ==> P(x,y) <-> Q(x,y) |] ==> 
-              \{y. x:A, P(x,y)\} = \{y. x:B, Q(x,y)\}
-
-\idx{RepFunI}       [| a : A |] ==> f(a) : RepFun(A,f)
-\idx{RepFunE}       [| b : RepFun(A, %x.f(x));  
-                 !!x.[| x:A;  b=f(x) |] ==> P |] ==> P
-
-\idx{RepFun_cong}   [| A=B;  !!x. x:B ==> f(x)=g(x) |] ==> 
-              RepFun(A, %x.f(x)) = RepFun(B, %x.g(x))
-
-
-\idx{separation}     x : Collect(A,P) <-> x:A & P(x)
-\idx{CollectI}       [| a:A;  P(a) |] ==> a : \{x:A. P(x)\}
-\idx{CollectE}       [| a : \{x:A. P(x)\};  [| a:A; P(a) |] ==> R |] ==> R
-\idx{CollectD1}      a : \{x:A. P(x)\} ==> a:A
-\idx{CollectD2}      a : \{x:A. P(x)\} ==> P(a)
-
-\idx{Collect_cong}   [| A=B;  !!x. x:B ==> P(x) <-> Q(x) |] ==> 
-               \{x:A. P(x)\} = \{x:B. Q(x)\}
-
-\idx{UnionI}    [| B: C;  A: B |] ==> A: Union(C)
-\idx{UnionE}    [| A : Union(C);  !!B.[| A: B;  B: C |] ==> R |] ==> R
-
-\idx{InterI}    [| !!x. x: C ==> A: x;  c:C |] ==> A : Inter(C)
-\idx{InterD}    [| A : Inter(C);  B : C |] ==> A : B
-\idx{InterE}    [| A : Inter(C);  A:B ==> R;  ~ B:C ==> R |] ==> R
-
-\idx{UN_I}      [| a: A;  b: B(a) |] ==> b: (UN x:A. B(x))
-\idx{UN_E}      [| b : (UN x:A. B(x));  !!x.[| x: A;  b: B(x) |] ==> R |] ==> R
-
-\idx{INT_I}     [| !!x. x: A ==> b: B(x);  a: A |] ==> b: (INT x:A. B(x))
-\idx{INT_E}     [| b : (INT x:A. B(x));  a: A |] ==> b : B(a)
-
-
-%%%% UPAIR.ML
-
-\idx{pairing}      a:Upair(b,c) <-> (a=b | a=c)
-\idx{UpairI1}      a : Upair(a,b)
-\idx{UpairI2}      b : Upair(a,b)
-\idx{UpairE}       [| a : Upair(b,c);  a = b ==> P;  a = c ==> P |] ==> P
-
-\idx{UnI1}         c : A ==> c : A Un B
-\idx{UnI2}         c : B ==> c : A Un B
-\idx{UnCI}         (~c : B ==> c : A) ==> c : A Un B
-\idx{UnE}          [| c : A Un B;  c:A ==> P;  c:B ==> P |] ==> P
-
-\idx{IntI}         [| c : A;  c : B |] ==> c : A Int B
-\idx{IntD1}        c : A Int B ==> c : A
-\idx{IntD2}        c : A Int B ==> c : B
-\idx{IntE}         [| c : A Int B;  [| c:A; c:B |] ==> P |] ==> P
-
-\idx{DiffI}        [| c : A;  ~ c : B |] ==> c : A - B
-\idx{DiffD1}       c : A - B ==> c : A
-\idx{DiffD2}       [| c : A - B;  c : B |] ==> P
-\idx{DiffE}        [| c : A - B;  [| c:A; ~ c:B |] ==> P |] ==> P
-
-\idx{consI1}       a : cons(a,B)
-\idx{consI2}       a : B ==> a : cons(b,B)
-\idx{consCI}       (~ a:B ==> a=b) ==> a: cons(b,B)
-\idx{consE}        [| a : cons(b,A);  a=b ==> P;  a:A ==> P |] ==> P
-
-\idx{singletonI}   a : \{a\}
-\idx{singletonE}   [| a : \{b\}; a=b ==> P |] ==> P
-
-\idx{succI1}       i : succ(i)
-\idx{succI2}       i : j ==> i : succ(j)
-\idx{succCI}       (~ i:j ==> i=j) ==> i: succ(j)
-\idx{succE}        [| i : succ(j);  i=j ==> P;  i:j ==> P |] ==> P
-\idx{succ_neq_0}   [| succ(n)=0 |] ==> P
-\idx{succ_inject}  succ(m) = succ(n) ==> m=n
-
-\idx{the_equality}     [| P(a);  !!x. P(x) ==> x=a |] ==> (THE x. P(x)) = a
-\idx{theI}             EX! x. P(x) ==> P(THE x. P(x))
-
-\idx{mem_anti_sym}     [| a:b;  b:a |] ==> P
-\idx{mem_anti_refl}    a:a ==> P
-
-
-%%% SUBSET.ML
-
-\idx{Union_upper}       B:A ==> B <= Union(A)
-\idx{Union_least}       [| !!x. x:A ==> x<=C |] ==> Union(A) <= C
-
-\idx{Inter_lower}       B:A ==> Inter(A) <= B
-\idx{Inter_greatest}    [| a:A;  !!x. x:A ==> C<=x |] ==> C <= Inter(A)
-
-\idx{Un_upper1}         A <= A Un B
-\idx{Un_upper2}         B <= A Un B
-\idx{Un_least}          [| A<=C;  B<=C |] ==> A Un B <= C
-
-\idx{Int_lower1}        A Int B <= A
-\idx{Int_lower2}        A Int B <= B
-\idx{Int_greatest}      [| C<=A;  C<=B |] ==> C <= A Int B
-
-\idx{Diff_subset}       A-B <= A
-\idx{Diff_contains}     [| C<=A;  C Int B = 0 |] ==> C <= A-B
-
-\idx{Collect_subset}    Collect(A,P) <= A
-
-%%% PAIR.ML
-
-\idx{Pair_inject1}    <a,b> = <c,d> ==> a=c
-\idx{Pair_inject2}    <a,b> = <c,d> ==> b=d
-\idx{Pair_inject}     [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
-\idx{Pair_neq_0}      <a,b>=0 ==> P
-
-\idx{fst_conv}        fst(<a,b>) = a
-\idx{snd_conv}        snd(<a,b>) = b
-\idx{split_conv}      split(<a,b>, %x y.c(x,y)) = c(a,b)
-
-\idx{SigmaI}    [| a:A;  b:B(a) |] ==> <a,b> : (SUM x:A. B(x))
-
-\idx{SigmaE}    [| c: (SUM x:A. B(x));  
-             !!x y.[| x:A;  y:B(x);  c=<x,y> |] ==> P 
-          |] ==> P
-
-\idx{SigmaE2}   [| <a,b> : (SUM x:A. B(x));    
-             [| a:A;  b:B(a) |] ==> P   
-          |] ==> P
-
-
-%%% DOMRANGE.ML
-
-\idx{domainI}        <a,b>: r ==> a : domain(r)
-\idx{domainE}        [| a : domain(r);  !!y. <a,y>: r ==> P |] ==> P
-\idx{domain_subset}  domain(Sigma(A,B)) <= A
-
-\idx{rangeI}         <a,b>: r ==> b : range(r)
-\idx{rangeE}         [| b : range(r);  !!x. <x,b>: r ==> P |] ==> P
-\idx{range_subset}   range(A*B) <= B
-
-\idx{fieldI1}        <a,b>: r ==> a : field(r)
-\idx{fieldI2}        <a,b>: r ==> b : field(r)
-\idx{fieldCI}        (~ <c,a>:r ==> <a,b>: r) ==> a : field(r)
-
-\idx{fieldE}         [| a : field(r);  
-                  !!x. <a,x>: r ==> P;  
-                  !!x. <x,a>: r ==> P      
-               |] ==> P
-
-\idx{field_subset}   field(A*A) <= A
-
-\idx{imageI}         [| <a,b>: r;  a:A |] ==> b : r``A
-\idx{imageE}         [| b: r``A;  !!x.[| <x,b>: r;  x:A |] ==> P |] ==> P
-
-\idx{vimageI}        [| <a,b>: r;  b:B |] ==> a : r-``B
-\idx{vimageE}        [| a: r-``B;  !!x.[| <a,x>: r;  x:B |] ==> P |] ==> P
-
-
-%%% FUNC.ML
-
-\idx{fun_is_rel}       f: (PROD x:A.B(x)) ==> f <= Sigma(A,B)
-
-\idx{apply_equality}   [| <a,b>: f;  f: (PROD x:A.B(x)) |] ==> f`a = b
-\idx{apply_equality2}  [| <a,b>: f;  <a,c>: f;  f: (PROD x:A.B(x)) |] ==> b=c
-
-\idx{apply_type}       [| f: (PROD x:A.B(x));  a:A |] ==> f`a : B(a)
-\idx{apply_Pair}       [| f: (PROD x:A.B(x));  a:A |] ==> <a,f`a>: f
-\idx{apply_iff}        [| f: (PROD x:A.B(x));  a:A |] ==> <a,b>: f <-> f`a = b
-
-\idx{domain_type}      [| <a,b> : f;  f: (PROD x:A.B(x)) |] ==> a : A
-\idx{range_type}       [| <a,b> : f;  f: (PROD x:A.B(x)) |] ==> b : B(a)
-
-\idx{Pi_type}          [| f: A->C;  !!x. x:A ==> f`x : B(x) |] ==> f: Pi(A,B)
-\idx{domain_of_fun}    f : Pi(A,B) ==> domain(f)=A
-\idx{range_of_fun}     f : Pi(A,B) ==> f: A->range(f)
-
-\idx{fun_extension}    [| f : (PROD x:A.B(x));  g: (PROD x:A.D(x));  
-                    !!x. x:A ==> f`x = g`x       
-                 |] ==> f=g
-
-\idx{lamI}             a:A ==> <a,b(a)> : (lam x:A. b(x))
-\idx{lamE}             [| p: (lam x:A. b(x));  !!x.[| x:A; p=<x,b(x)> |] ==> P 
-                 |] ==>  P
-
-\idx{lam_type}         [| !!x. x:A ==> b(x): B(x) |] ==> 
-                 (lam x:A.b(x)) : (PROD x:A.B(x))
-
-\idx{beta_conv}        a : A ==> (lam x:A.b(x)) ` a = b(a)
-\idx{eta_conv}         f : (PROD x:A.B(x)) ==> (lam x:A. f`x) = f
-
-\idx{lam_theI}         (!!x. x:A ==> EX! y. Q(x,y)) ==> EX h. ALL x:A. Q(x, h`x)
-
-\idx{restrict_conv}          a : A ==> restrict(f,A) ` a = f`a
-\idx{restrict_type}          [| !!x. x:A ==> f`x: B(x) |] ==> 
-                       restrict(f,A) : (PROD x:A.B(x))
-
-\idx{fun_empty}              0: 0->0
-\idx{fun_single}             \{<a,b>\} : \{a\} -> \{b\}
-
-\idx{fun_disjoint_Un}        [| f: A->B;  g: C->D;  A Int C = 0  |] ==>  
-                       (f Un g) : (A Un C) -> (B Un D)
-
-\idx{fun_disjoint_apply1}    [| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  
-                       (f Un g)`a = f`a
-
-\idx{fun_disjoint_apply2}    [| c:C;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  
-                       (f Un g)`c = g`c
-
-
-%%% SIMPDATA.ML
-
-  a\in a 		& \bimp &  False\\
-  a\in \emptyset 	& \bimp &  False\\
-  a \in A \union B 	& \bimp &  a\in A \disj a\in B\\
-  a \in A \inter B 	& \bimp &  a\in A \conj a\in B\\
-  a \in A-B 		& \bimp &  a\in A \conj \neg (a\in B)\\
-  a \in {\tt cons}(b,B) & \bimp &  a=b \disj a\in B\\
-  i \in {\tt succ}(j) 	& \bimp &  i=j \disj i\in j\\
-  A\in \bigcup(C) 	& \bimp &  (\exists B. B\in C \conj A\in B)\\
-  A\in \bigcap(C) 	& \bimp &  (\forall B. B\in C \imp A\in B)
-	\qquad (\exists B. B\in C)\\
-  a \in {\tt Collect}(A,P) 	& \bimp &  a\in A \conj P(a)\\
-  b \in {\tt RepFun}(A,f) 	& \bimp &  (\exists x. x\in A \conj b=f(x))
-
-equalities.ML perm.ML plus.ML nat.ML
-----------------------------------------------------------------
-equalities.ML
-
-\idx{Int_absorb}         A Int A = A
-\idx{Int_commute}        A Int B = B Int A
-\idx{Int_assoc}          (A Int B) Int C  =  A Int (B Int C)
-\idx{Int_Un_distrib}     (A Un B) Int C  =  (A Int C) Un (B Int C)
-
-\idx{Un_absorb}          A Un A = A
-\idx{Un_commute}         A Un B = B Un A
-\idx{Un_assoc}           (A Un B) Un C  =  A Un (B Un C)
-\idx{Un_Int_distrib}     (A Int B) Un C  =  (A Un C) Int (B Un C)
-
-\idx{Diff_cancel}        A-A = 0
-\idx{Diff_disjoint}      A Int (B-A) = 0
-\idx{Diff_partition}     A<=B ==> A Un (B-A) = B
-\idx{double_complement}  [| A<=B; B<= C |] ==> (B - (C-A)) = A
-\idx{Diff_Un}            A - (B Un C) = (A-B) Int (A-C)
-\idx{Diff_Int}           A - (B Int C) = (A-B) Un (A-C)
-
-\idx{Union_Un_distrib}   Union(A Un B) = Union(A) Un Union(B)
-\idx{Inter_Un_distrib}   [| a:A;  b:B |] ==> 
-                   Inter(A Un B) = Inter(A) Int Inter(B)
-
-\idx{Int_Union_RepFun}   A Int Union(B) = (UN C:B. A Int C)
-
-\idx{Un_Inter_RepFun}    b:B ==> 
-                   A Un Inter(B) = (INT C:B. A Un C)
-
-\idx{SUM_Un_distrib1}    (SUM x:A Un B. C(x)) = 
-                   (SUM x:A. C(x)) Un (SUM x:B. C(x))
-
-\idx{SUM_Un_distrib2}    (SUM x:C. A(x) Un B(x)) =
-                   (SUM x:C. A(x)) Un (SUM x:C. B(x))
-
-\idx{SUM_Int_distrib1}   (SUM x:A Int B. C(x)) =
-                   (SUM x:A. C(x)) Int (SUM x:B. C(x))
-
-\idx{SUM_Int_distrib2}   (SUM x:C. A(x) Int B(x)) =
-                   (SUM x:C. A(x)) Int (SUM x:C. B(x))
-
-
-----------------------------------------------------------------
-perm.ML
-
-\idx{comp_def}
-        r O s == \{xz : domain(s)*range(r) . 
-                  EX x y z. xz=<x,z> & <x,y>:s & <y,z>:r\}),
-\idx{id_def}                    (*the identity function for A*)
-        id(A) == (lam x:A. x)),
-\idx{inj_def} 
-        inj(A,B) == 
-            \{ f: A->B. ALL w:A. ALL x:A. f`w=f`x --> w=x\}),
-\idx{surj_def} 
-        surj(A,B) == \{ f: A->B . ALL y:B. EX x:A. f`x=y\}),
-\idx{bij_def}
-        bij(A,B) == inj(A,B) Int surj(A,B))
-
-
-\idx{surj_is_fun}        f: surj(A,B) ==> f: A->B
-\idx{fun_is_surj}        f : Pi(A,B) ==> f: surj(A,range(f))
-
-\idx{inj_is_fun}         f: inj(A,B) ==> f: A->B
-\idx{inj_equality}       [| <a,b>:f;  <c,b>:f;  f: inj(A,B) |] ==> a=c
-
-\idx{bij_is_fun}         f: bij(A,B) ==> f: A->B
-
-\idx{inj_converse_surj}  f: inj(A,B) ==> converse(f): surj(range(f), A)
-
-\idx{left_inverse}       [| f: inj(A,B);  a: A |] ==> converse(f)`(f`a) = a
-\idx{right_inverse}      [| f: inj(A,B);  b: range(f) |] ==> 
-                   f`(converse(f)`b) = b
-
-\idx{inj_converse_inj}   f: inj(A,B) ==> converse(f): inj(range(f), A)
-\idx{bij_converse_bij}   f: bij(A,B) ==> converse(f): bij(B,A)
-
-\idx{comp_type}          [| s<=A*B;  r<=B*C |] ==> (r O s) <= A*C
-\idx{comp_assoc}         (r O s) O t = r O (s O t)
-
-\idx{left_comp_id}       r<=A*B ==> id(B) O r = r
-\idx{right_comp_id}      r<=A*B ==> r O id(A) = r
-
-\idx{comp_func}          [| g: A->B;  f: B->C |] ==> (f O g) : A->C
-\idx{comp_func_apply}    [| g: A->B;  f: B->C;  a:A |] ==> (f O g)`a = f`(g`a)
-
-\idx{comp_inj}      [| g: inj(A,B);   f: inj(B,C)  |] ==> (f O g) : inj(A,C)
-\idx{comp_surj}     [| g: surj(A,B);  f: surj(B,C) |] ==> (f O g) : surj(A,C)
-\idx{comp_bij}      [| g: bij(A,B);  f: bij(B,C) |] ==> (f O g) : bij(A,C)
-
-\idx{left_comp_inverse}     f: inj(A,B) ==> converse(f) O f = id(A)
-\idx{right_comp_inverse}    f: surj(A,B) ==> f O converse(f) = id(B)
-
-\idx{bij_disjoint_Un}   
-    [| f: bij(A,B);  g: bij(C,D);  A Int C = 0;  B Int D = 0 |] ==> 
-    (f Un g) : bij(A Un C, B Un D)
-
-\idx{restrict_bij}  [| f: inj(A,B);  C<=A |] ==> restrict(f,C): bij(C, f``C)
-
-
-----------------------------------------------------------------
-plus.ML
-
-\idx{plus_def}      A+B == \{0\}*A Un \{\{0\}\}*B
-\idx{Inl_def}       Inl(a) == < 0 ,a>
-\idx{Inr_def}       Inr(b) == <\{0\},b>
-\idx{when_def}      when(u,c,d) == 
-                THE y. EX z.(u=Inl(z) & y=c(z)) | (u=Inr(z) & y=d(z))
-
-\idx{plus_InlI}     a : A ==> Inl(a) : A+B
-\idx{plus_InrI}     b : B ==> Inr(b) : A+B
-
-\idx{Inl_inject}    Inl(a) = Inl(b) ==>  a=b
-\idx{Inr_inject}    Inr(a) = Inr(b) ==> a=b
-\idx{Inl_neq_Inr}   Inl(a)=Inr(b) ==> P
-
-\idx{plusE2}        u: A+B ==> (EX x. x:A & u=Inl(x)) | (EX y. y:B & u=Inr(y))
-
-\idx{when_Inl_conv} when(Inl(a),c,d) = c(a)
-\idx{when_Inr_conv} when(Inr(b),c,d) = d(b)
-
-\idx{when_type}     [| u: A+B; 
-                 !!x. x: A ==> c(x): C(Inl(x));   
-                 !!y. y: B ==> d(y): C(Inr(y)) 
-              |] ==> when(u,c,d) : C(u)
-
-
-----------------------------------------------------------------
-nat.ML
-
-
-\idx{nat_def}       nat == lfp(lam r: Pow(Inf). \{0\} Un RepFun(r,succ))
-\idx{nat_case_def}  nat_case(n,a,b) == 
-                THE y. n=0 & y=a | (EX x. n=succ(x) & y=b(x))
-\idx{nat_rec_def}   nat_rec(k,a,b) == 
-                transrec(nat, k, %n f. nat_case(n, a, %m. b(m, f`m)))
-
-\idx{nat_0_I}       0 : nat
-\idx{nat_succ_I}    n : nat ==> succ(n) : nat
-
-\idx{nat_induct}        
-    [| n: nat;  P(0);  !!x. [| x: nat;  P(x) |] ==> P(succ(x)) 
-    |] ==> P(n)
-
-\idx{nat_case_0_conv}       nat_case(0,a,b) = a
-\idx{nat_case_succ_conv}    nat_case(succ(m),a,b) = b(m)
-
-\idx{nat_case_type}     
-    [| n: nat;  a: C(0);  !!m. m: nat ==> b(m): C(succ(m))  
-    |] ==> nat_case(n,a,b) : C(n)
-
-\idx{nat_rec_0_conv}        nat_rec(0,a,b) = a
-\idx{nat_rec_succ_conv}     m: nat ==> nat_rec(succ(m),a,b) = b(m, nat_rec(m,a,b))
-
-\idx{nat_rec_type}      
-    [| n: nat;  
-       a: C(0);  
-       !!m z. [| m: nat;  z: C(m) |] ==> b(m,z): C(succ(m))  
-    |] ==> nat_rec(n,a,b) : C(n)
--- a/doc-src/Logics/ZF.tex	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1892 +0,0 @@
-%% $Id$
-\chapter{Zermelo-Fraenkel Set Theory}
-\index{set theory|(}
-
-The theory~\thydx{ZF} implements Zermelo-Fraenkel set
-theory~\cite{halmos60,suppes72} as an extension of~\texttt{FOL}, classical
-first-order logic.  The theory includes a collection of derived natural
-deduction rules, for use with Isabelle's classical reasoner.  Much
-of it is based on the work of No\"el~\cite{noel}.
-
-A tremendous amount of set theory has been formally developed, including
-the basic properties of relations, functions, ordinals and cardinals.
-Significant results have been proved, such as the Schr\"oder-Bernstein
-Theorem, the Wellordering Theorem and a version of Ramsey's Theorem.
-General methods have been developed for solving recursion equations over
-monotonic functors; these have been applied to yield constructions of
-lists, trees, infinite lists, etc.  The Recursion Theorem has been proved,
-admitting recursive definitions of functions over well-founded relations.
-Thus, we may even regard set theory as a computational logic, loosely
-inspired by Martin-L\"of's Type Theory.
-
-Because {\ZF} is an extension of {\FOL}, it provides the same
-packages, namely \texttt{hyp_subst_tac}, the simplifier, and the
-classical reasoner.  The default simpset and claset are usually
-satisfactory.  Named simpsets include \ttindexbold{ZF_ss} (basic set
-theory rules) and \ttindexbold{rank_ss} (for proving termination of
-well-founded recursion).  Named clasets include \ttindexbold{ZF_cs}
-(basic set theory) and \ttindexbold{le_cs} (useful for reasoning about
-the relations $<$ and $\le$).
-
-\texttt{ZF} has a flexible package for handling inductive definitions,
-such as inference systems, and datatype definitions, such as lists and
-trees.  Moreover it handles coinductive definitions, such as
-bisimulation relations, and codatatype definitions, such as streams.
-There is a paper \cite{paulson-CADE} describing the package, but its
-examples use an obsolete declaration syntax.  Please consult the
-version of the paper distributed with Isabelle.
-
-Recent reports~\cite{paulson-set-I,paulson-set-II} describe \texttt{ZF} less
-formally than this chapter.  Isabelle employs a novel treatment of
-non-well-founded data structures within the standard {\sc zf} axioms including
-the Axiom of Foundation~\cite{paulson-final}.
-
-
-\section{Which version of axiomatic set theory?}
-The two main axiom systems for set theory are Bernays-G\"odel~({\sc bg})
-and Zermelo-Fraenkel~({\sc zf}).  Resolution theorem provers can use {\sc
-  bg} because it is finite~\cite{boyer86,quaife92}.  {\sc zf} does not
-have a finite axiom system because of its Axiom Scheme of Replacement.
-This makes it awkward to use with many theorem provers, since instances
-of the axiom scheme have to be invoked explicitly.  Since Isabelle has no
-difficulty with axiom schemes, we may adopt either axiom system.
-
-These two theories differ in their treatment of {\bf classes}, which are
-collections that are `too big' to be sets.  The class of all sets,~$V$,
-cannot be a set without admitting Russell's Paradox.  In {\sc bg}, both
-classes and sets are individuals; $x\in V$ expresses that $x$ is a set.  In
-{\sc zf}, all variables denote sets; classes are identified with unary
-predicates.  The two systems define essentially the same sets and classes,
-with similar properties.  In particular, a class cannot belong to another
-class (let alone a set).
-
-Modern set theorists tend to prefer {\sc zf} because they are mainly concerned
-with sets, rather than classes.  {\sc bg} requires tiresome proofs that various
-collections are sets; for instance, showing $x\in\{x\}$ requires showing that
-$x$ is a set.
-
-
-\begin{figure} \small
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder\\
-  \cdx{0}       & $i$           & empty set\\
-  \cdx{cons}    & $[i,i]\To i$  & finite set constructor\\
-  \cdx{Upair}   & $[i,i]\To i$  & unordered pairing\\
-  \cdx{Pair}    & $[i,i]\To i$  & ordered pairing\\
-  \cdx{Inf}     & $i$   & infinite set\\
-  \cdx{Pow}     & $i\To i$      & powerset\\
-  \cdx{Union} \cdx{Inter} & $i\To i$    & set union/intersection \\
-  \cdx{split}   & $[[i,i]\To i, i] \To i$ & generalized projection\\
-  \cdx{fst} \cdx{snd}   & $i\To i$      & projections\\
-  \cdx{converse}& $i\To i$      & converse of a relation\\
-  \cdx{succ}    & $i\To i$      & successor\\
-  \cdx{Collect} & $[i,i\To o]\To i$     & separation\\
-  \cdx{Replace} & $[i, [i,i]\To o] \To i$       & replacement\\
-  \cdx{PrimReplace} & $[i, [i,i]\To o] \To i$   & primitive replacement\\
-  \cdx{RepFun}  & $[i, i\To i] \To i$   & functional replacement\\
-  \cdx{Pi} \cdx{Sigma}  & $[i,i\To i]\To i$     & general product/sum\\
-  \cdx{domain}  & $i\To i$      & domain of a relation\\
-  \cdx{range}   & $i\To i$      & range of a relation\\
-  \cdx{field}   & $i\To i$      & field of a relation\\
-  \cdx{Lambda}  & $[i, i\To i]\To i$    & $\lambda$-abstraction\\
-  \cdx{restrict}& $[i, i] \To i$        & restriction of a function\\
-  \cdx{The}     & $[i\To o]\To i$       & definite description\\
-  \cdx{if}      & $[o,i,i]\To i$        & conditional\\
-  \cdx{Ball} \cdx{Bex}  & $[i, i\To o]\To o$    & bounded quantifiers
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\index{*"`"` symbol}
-\index{*"-"`"` symbol}
-\index{*"` symbol}\index{function applications!in \ZF}
-\index{*"- symbol}
-\index{*": symbol}
-\index{*"<"= symbol}
-\begin{tabular}{rrrr} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \tt ``        & $[i,i]\To i$  &  Left 90      & image \\
-  \tt -``       & $[i,i]\To i$  &  Left 90      & inverse image \\
-  \tt `         & $[i,i]\To i$  &  Left 90      & application \\
-  \sdx{Int}     & $[i,i]\To i$  &  Left 70      & intersection ($\int$) \\
-  \sdx{Un}      & $[i,i]\To i$  &  Left 65      & union ($\un$) \\
-  \tt -         & $[i,i]\To i$  &  Left 65      & set difference ($-$) \\[1ex]
-  \tt:          & $[i,i]\To o$  &  Left 50      & membership ($\in$) \\
-  \tt <=        & $[i,i]\To o$  &  Left 50      & subset ($\subseteq$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-\caption{Constants of {\ZF}} \label{zf-constants}
-\end{figure} 
-
-
-\section{The syntax of set theory}
-The language of set theory, as studied by logicians, has no constants.  The
-traditional axioms merely assert the existence of empty sets, unions,
-powersets, etc.; this would be intolerable for practical reasoning.  The
-Isabelle theory declares constants for primitive sets.  It also extends
-\texttt{FOL} with additional syntax for finite sets, ordered pairs,
-comprehension, general union/intersection, general sums/products, and
-bounded quantifiers.  In most other respects, Isabelle implements precisely
-Zermelo-Fraenkel set theory.
-
-Figure~\ref{zf-constants} lists the constants and infixes of~\ZF, while
-Figure~\ref{zf-trans} presents the syntax translations.  Finally,
-Figure~\ref{zf-syntax} presents the full grammar for set theory, including
-the constructs of \FOL.
-
-Local abbreviations can be introduced by a \texttt{let} construct whose
-syntax appears in Fig.\ts\ref{zf-syntax}.  Internally it is translated into
-the constant~\cdx{Let}.  It can be expanded by rewriting with its
-definition, \tdx{Let_def}.
-
-Apart from \texttt{let}, set theory does not use polymorphism.  All terms in
-{\ZF} have type~\tydx{i}, which is the type of individuals and has class~{\tt
-  term}.  The type of first-order formulae, remember, is~\textit{o}.
-
-Infix operators include binary union and intersection ($A\un B$ and
-$A\int B$), set difference ($A-B$), and the subset and membership
-relations.  Note that $a$\verb|~:|$b$ is translated to $\neg(a\in b)$.  The
-union and intersection operators ($\bigcup A$ and $\bigcap A$) form the
-union or intersection of a set of sets; $\bigcup A$ means the same as
-$\bigcup@{x\in A}x$.  Of these operators, only $\bigcup A$ is primitive.
-
-The constant \cdx{Upair} constructs unordered pairs; thus {\tt
-  Upair($A$,$B$)} denotes the set~$\{A,B\}$ and \texttt{Upair($A$,$A$)}
-denotes the singleton~$\{A\}$.  General union is used to define binary
-union.  The Isabelle version goes on to define the constant
-\cdx{cons}:
-\begin{eqnarray*}
-   A\cup B              & \equiv &       \bigcup(\texttt{Upair}(A,B)) \\
-   \texttt{cons}(a,B)      & \equiv &        \texttt{Upair}(a,a) \un B
-\end{eqnarray*}
-The $\{a@1, \ldots\}$ notation abbreviates finite sets constructed in the
-obvious manner using~\texttt{cons} and~$\emptyset$ (the empty set):
-\begin{eqnarray*}
- \{a,b,c\} & \equiv & \texttt{cons}(a,\texttt{cons}(b,\texttt{cons}(c,\emptyset)))
-\end{eqnarray*}
-
-The constant \cdx{Pair} constructs ordered pairs, as in {\tt
-Pair($a$,$b$)}.  Ordered pairs may also be written within angle brackets,
-as {\tt<$a$,$b$>}.  The $n$-tuple {\tt<$a@1$,\ldots,$a@{n-1}$,$a@n$>}
-abbreviates the nest of pairs\par\nobreak
-\centerline\texttt{Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots).}
-
-In {\ZF}, a function is a set of pairs.  A {\ZF} function~$f$ is simply an
-individual as far as Isabelle is concerned: its Isabelle type is~$i$, not
-say $i\To i$.  The infix operator~{\tt`} denotes the application of a
-function set to its argument; we must write~$f{\tt`}x$, not~$f(x)$.  The
-syntax for image is~$f{\tt``}A$ and that for inverse image is~$f{\tt-``}A$.
-
-
-\begin{figure} 
-\index{lambda abs@$\lambda$-abstractions!in \ZF}
-\index{*"-"> symbol}
-\index{*"* symbol}
-\begin{center} \footnotesize\tt\frenchspacing
-\begin{tabular}{rrr} 
-  \it external          & \it internal  & \it description \\ 
-  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm negated membership\\
-  \ttlbrace$a@1$, $\ldots$, $a@n$\ttrbrace  &  cons($a@1$,$\ldots$,cons($a@n$,0)) &
-        \rm finite set \\
-  <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
-        Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
-        \rm ordered $n$-tuple \\
-  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
-        \rm separation \\
-  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
-        \rm replacement \\
-  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
-        \rm functional replacement \\
-  \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general intersection \\
-  \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general union \\
-  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
-        \rm general product \\
-  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
-        \rm general sum \\
-  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
-        \rm function space \\
-  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
-        \rm binary product \\
-  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
-        \rm definite description \\
-  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
-        \rm $\lambda$-abstraction\\[1ex]
-  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\forall$ \\
-  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\exists$
-\end{tabular}
-\end{center}
-\caption{Translations for {\ZF}} \label{zf-trans}
-\end{figure} 
-
-
-\begin{figure} 
-\index{*let symbol}
-\index{*in symbol}
-\dquotes
-\[\begin{array}{rcl}
-    term & = & \hbox{expression of type~$i$} \\
-         & | & "let"~id~"="~term";"\dots";"~id~"="~term~"in"~term \\
-         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
-         & | & "< "  term\; ("," term)^* " >"  \\
-         & | & "{\ttlbrace} " id ":" term " . " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " id " . " id ":" term ", " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " term " . " id ":" term " {\ttrbrace}" \\
-         & | & term " `` " term \\
-         & | & term " -`` " term \\
-         & | & term " ` " term \\
-         & | & term " * " term \\
-         & | & term " Int " term \\
-         & | & term " Un " term \\
-         & | & term " - " term \\
-         & | & term " -> " term \\
-         & | & "THE~~"  id  " . " formula\\
-         & | & "lam~~"  id ":" term " . " term \\
-         & | & "INT~~"  id ":" term " . " term \\
-         & | & "UN~~~"  id ":" term " . " term \\
-         & | & "PROD~"  id ":" term " . " term \\
-         & | & "SUM~~"  id ":" term " . " term \\[2ex]
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " : " term \\
-         & | & term " \ttilde: " term \\
-         & | & term " <= " term \\
-         & | & term " = " term \\
-         & | & term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL " id ":" term " . " formula \\
-         & | & "EX~~" id ":" term " . " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\caption{Full grammar for {\ZF}} \label{zf-syntax}
-\end{figure} 
-
-
-\section{Binding operators}
-The constant \cdx{Collect} constructs sets by the principle of {\bf
-  separation}.  The syntax for separation is
-\hbox{\tt\ttlbrace$x$:$A$. $P[x]$\ttrbrace}, where $P[x]$ is a formula
-that may contain free occurrences of~$x$.  It abbreviates the set {\tt
-  Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
-satisfy~$P[x]$.  Note that \texttt{Collect} is an unfortunate choice of
-name: some set theories adopt a set-formation principle, related to
-replacement, called collection.
-
-The constant \cdx{Replace} constructs sets by the principle of {\bf
-  replacement}.  The syntax
-\hbox{\tt\ttlbrace$y$. $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set {\tt
-  Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
-that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
-has the condition that $Q$ must be single-valued over~$A$: for
-all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
-single-valued binary predicate is also called a {\bf class function}.
-
-The constant \cdx{RepFun} expresses a special case of replacement,
-where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
-single-valued, since it is just the graph of the meta-level
-function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
-for~$x\in A$.  This is analogous to the \ML{} functional \texttt{map},
-since it applies a function to every element of a set.  The syntax is
-\hbox{\tt\ttlbrace$b[x]$. $x$:$A$\ttrbrace}, which expands to {\tt
-  RepFun($A$,$\lambda x. b[x]$)}.
-
-\index{*INT symbol}\index{*UN symbol} 
-General unions and intersections of indexed
-families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
-are written \hbox{\tt UN $x$:$A$. $B[x]$} and \hbox{\tt INT $x$:$A$. $B[x]$}.
-Their meaning is expressed using \texttt{RepFun} as
-\[
-\bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
-\bigcap(\{B[x]. x\in A\}). 
-\]
-General sums $\sum@{x\in A}B[x]$ and products $\prod@{x\in A}B[x]$ can be
-constructed in set theory, where $B[x]$ is a family of sets over~$A$.  They
-have as special cases $A\times B$ and $A\to B$, where $B$ is simply a set.
-This is similar to the situation in Constructive Type Theory (set theory
-has `dependent sets') and calls for similar syntactic conventions.  The
-constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
-products.  Instead of \texttt{Sigma($A$,$B$)} and \texttt{Pi($A$,$B$)} we may write
-\hbox{\tt SUM $x$:$A$. $B[x]$} and \hbox{\tt PROD $x$:$A$. $B[x]$}.  
-\index{*SUM symbol}\index{*PROD symbol}%
-The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
-general sums and products over a constant family.\footnote{Unlike normal
-infix operators, {\tt*} and {\tt->} merely define abbreviations; there are
-no constants~{\tt op~*} and~\hbox{\tt op~->}.} Isabelle accepts these
-abbreviations in parsing and uses them whenever possible for printing.
-
-\index{*THE symbol} 
-As mentioned above, whenever the axioms assert the existence and uniqueness
-of a set, Isabelle's set theory declares a constant for that set.  These
-constants can express the {\bf definite description} operator~$\iota
-x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$, if such exists.
-Since all terms in {\ZF} denote something, a description is always
-meaningful, but we do not know its value unless $P[x]$ defines it uniquely.
-Using the constant~\cdx{The}, we may write descriptions as {\tt
-  The($\lambda x. P[x]$)} or use the syntax \hbox{\tt THE $x$. $P[x]$}.
-
-\index{*lam symbol}
-Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
-stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
-this to be a set, the function's domain~$A$ must be given.  Using the
-constant~\cdx{Lambda}, we may express function sets as {\tt
-Lambda($A$,$\lambda x. b[x]$)} or use the syntax \hbox{\tt lam $x$:$A$. $b[x]$}.
-
-Isabelle's set theory defines two {\bf bounded quantifiers}:
-\begin{eqnarray*}
-   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
-   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
-\end{eqnarray*}
-The constants~\cdx{Ball} and~\cdx{Bex} are defined
-accordingly.  Instead of \texttt{Ball($A$,$P$)} and \texttt{Bex($A$,$P$)} we may
-write
-\hbox{\tt ALL $x$:$A$. $P[x]$} and \hbox{\tt EX $x$:$A$. $P[x]$}.
-
-
-%%%% ZF.thy
-
-\begin{figure}
-\begin{ttbox}
-\tdx{Let_def}            Let(s, f) == f(s)
-
-\tdx{Ball_def}           Ball(A,P) == ALL x. x:A --> P(x)
-\tdx{Bex_def}            Bex(A,P)  == EX x. x:A & P(x)
-
-\tdx{subset_def}         A <= B  == ALL x:A. x:B
-\tdx{extension}          A = B  <->  A <= B & B <= A
-
-\tdx{Union_iff}          A : Union(C) <-> (EX B:C. A:B)
-\tdx{Pow_iff}            A : Pow(B) <-> A <= B
-\tdx{foundation}         A=0 | (EX x:A. ALL y:x. ~ y:A)
-
-\tdx{replacement}        (ALL x:A. ALL y z. P(x,y) & P(x,z) --> y=z) ==>
-                   b : PrimReplace(A,P) <-> (EX x:A. P(x,b))
-\subcaption{The Zermelo-Fraenkel Axioms}
-
-\tdx{Replace_def}  Replace(A,P) == 
-                   PrimReplace(A, \%x y. (EX!z. P(x,z)) & P(x,y))
-\tdx{RepFun_def}   RepFun(A,f)  == {\ttlbrace}y . x:A, y=f(x)\ttrbrace
-\tdx{the_def}      The(P)       == Union({\ttlbrace}y . x:{\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
-\tdx{if_def}       if(P,a,b)    == THE z. P & z=a | ~P & z=b
-\tdx{Collect_def}  Collect(A,P) == {\ttlbrace}y . x:A, x=y & P(x){\ttrbrace}
-\tdx{Upair_def}    Upair(a,b)   == 
-                 {\ttlbrace}y. x:Pow(Pow(0)), (x=0 & y=a) | (x=Pow(0) & y=b){\ttrbrace}
-\subcaption{Consequences of replacement}
-
-\tdx{Inter_def}    Inter(A) == {\ttlbrace}x:Union(A) . ALL y:A. x:y{\ttrbrace}
-\tdx{Un_def}       A Un  B  == Union(Upair(A,B))
-\tdx{Int_def}      A Int B  == Inter(Upair(A,B))
-\tdx{Diff_def}     A - B    == {\ttlbrace}x:A . x~:B{\ttrbrace}
-\subcaption{Union, intersection, difference}
-\end{ttbox}
-\caption{Rules and axioms of {\ZF}} \label{zf-rules}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{cons_def}     cons(a,A) == Upair(a,a) Un A
-\tdx{succ_def}     succ(i) == cons(i,i)
-\tdx{infinity}     0:Inf & (ALL y:Inf. succ(y): Inf)
-\subcaption{Finite and infinite sets}
-
-\tdx{Pair_def}       <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
-\tdx{split_def}      split(c,p) == THE y. EX a b. p=<a,b> & y=c(a,b)
-\tdx{fst_def}        fst(A)     == split(\%x y. x, p)
-\tdx{snd_def}        snd(A)     == split(\%x y. y, p)
-\tdx{Sigma_def}      Sigma(A,B) == UN x:A. UN y:B(x). {\ttlbrace}<x,y>{\ttrbrace}
-\subcaption{Ordered pairs and Cartesian products}
-
-\tdx{converse_def}   converse(r) == {\ttlbrace}z. w:r, EX x y. w=<x,y> & z=<y,x>{\ttrbrace}
-\tdx{domain_def}     domain(r)   == {\ttlbrace}x. w:r, EX y. w=<x,y>{\ttrbrace}
-\tdx{range_def}      range(r)    == domain(converse(r))
-\tdx{field_def}      field(r)    == domain(r) Un range(r)
-\tdx{image_def}      r `` A      == {\ttlbrace}y : range(r) . EX x:A. <x,y> : r{\ttrbrace}
-\tdx{vimage_def}     r -`` A     == converse(r)``A
-\subcaption{Operations on relations}
-
-\tdx{lam_def}    Lambda(A,b) == {\ttlbrace}<x,b(x)> . x:A{\ttrbrace}
-\tdx{apply_def}  f`a         == THE y. <a,y> : f
-\tdx{Pi_def}     Pi(A,B) == {\ttlbrace}f: Pow(Sigma(A,B)). ALL x:A. EX! y. <x,y>: f{\ttrbrace}
-\tdx{restrict_def}   restrict(f,A) == lam x:A. f`x
-\subcaption{Functions and general product}
-\end{ttbox}
-\caption{Further definitions of {\ZF}} \label{zf-defs}
-\end{figure}
-
-
-
-\section{The Zermelo-Fraenkel axioms}
-The axioms appear in Fig.\ts \ref{zf-rules}.  They resemble those
-presented by Suppes~\cite{suppes72}.  Most of the theory consists of
-definitions.  In particular, bounded quantifiers and the subset relation
-appear in other axioms.  Object-level quantifiers and implications have
-been replaced by meta-level ones wherever possible, to simplify use of the
-axioms.  See the file \texttt{ZF/ZF.thy} for details.
-
-The traditional replacement axiom asserts
-\[ y \in \texttt{PrimReplace}(A,P) \bimp (\exists x\in A. P(x,y)) \]
-subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
-The Isabelle theory defines \cdx{Replace} to apply
-\cdx{PrimReplace} to the single-valued part of~$P$, namely
-\[ (\exists!z. P(x,z)) \conj P(x,y). \]
-Thus $y\in \texttt{Replace}(A,P)$ if and only if there is some~$x$ such that
-$P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
-\texttt{Replace} is much easier to use than \texttt{PrimReplace}; it defines the
-same set, if $P(x,y)$ is single-valued.  The nice syntax for replacement
-expands to \texttt{Replace}.
-
-Other consequences of replacement include functional replacement
-(\cdx{RepFun}) and definite descriptions (\cdx{The}).
-Axioms for separation (\cdx{Collect}) and unordered pairs
-(\cdx{Upair}) are traditionally assumed, but they actually follow
-from replacement~\cite[pages 237--8]{suppes72}.
-
-The definitions of general intersection, etc., are straightforward.  Note
-the definition of \texttt{cons}, which underlies the finite set notation.
-The axiom of infinity gives us a set that contains~0 and is closed under
-successor (\cdx{succ}).  Although this set is not uniquely defined,
-the theory names it (\cdx{Inf}) in order to simplify the
-construction of the natural numbers.
-                                             
-Further definitions appear in Fig.\ts\ref{zf-defs}.  Ordered pairs are
-defined in the standard way, $\pair{a,b}\equiv\{\{a\},\{a,b\}\}$.  Recall
-that \cdx{Sigma}$(A,B)$ generalizes the Cartesian product of two
-sets.  It is defined to be the union of all singleton sets
-$\{\pair{x,y}\}$, for $x\in A$ and $y\in B(x)$.  This is a typical usage of
-general union.
-
-The projections \cdx{fst} and~\cdx{snd} are defined in terms of the
-generalized projection \cdx{split}.  The latter has been borrowed from
-Martin-L\"of's Type Theory, and is often easier to use than \cdx{fst}
-and~\cdx{snd}.
-
-Operations on relations include converse, domain, range, and image.  The
-set ${\tt Pi}(A,B)$ generalizes the space of functions between two sets.
-Note the simple definitions of $\lambda$-abstraction (using
-\cdx{RepFun}) and application (using a definite description).  The
-function \cdx{restrict}$(f,A)$ has the same values as~$f$, but only
-over the domain~$A$.
-
-
-%%%% zf.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{ballI}       [| !!x. x:A ==> P(x) |] ==> ALL x:A. P(x)
-\tdx{bspec}       [| ALL x:A. P(x);  x: A |] ==> P(x)
-\tdx{ballE}       [| ALL x:A. P(x);  P(x) ==> Q;  ~ x:A ==> Q |] ==> Q
-
-\tdx{ball_cong}   [| A=A';  !!x. x:A' ==> P(x) <-> P'(x) |] ==> 
-            (ALL x:A. P(x)) <-> (ALL x:A'. P'(x))
-
-\tdx{bexI}        [| P(x);  x: A |] ==> EX x:A. P(x)
-\tdx{bexCI}       [| ALL x:A. ~P(x) ==> P(a);  a: A |] ==> EX x:A. P(x)
-\tdx{bexE}        [| EX x:A. P(x);  !!x. [| x:A; P(x) |] ==> Q |] ==> Q
-
-\tdx{bex_cong}    [| A=A';  !!x. x:A' ==> P(x) <-> P'(x) |] ==> 
-            (EX x:A. P(x)) <-> (EX x:A'. P'(x))
-\subcaption{Bounded quantifiers}
-
-\tdx{subsetI}       (!!x. x:A ==> x:B) ==> A <= B
-\tdx{subsetD}       [| A <= B;  c:A |] ==> c:B
-\tdx{subsetCE}      [| A <= B;  ~(c:A) ==> P;  c:B ==> P |] ==> P
-\tdx{subset_refl}   A <= A
-\tdx{subset_trans}  [| A<=B;  B<=C |] ==> A<=C
-
-\tdx{equalityI}     [| A <= B;  B <= A |] ==> A = B
-\tdx{equalityD1}    A = B ==> A<=B
-\tdx{equalityD2}    A = B ==> B<=A
-\tdx{equalityE}     [| A = B;  [| A<=B; B<=A |] ==> P |]  ==>  P
-\subcaption{Subsets and extensionality}
-
-\tdx{emptyE}          a:0 ==> P
-\tdx{empty_subsetI}   0 <= A
-\tdx{equals0I}        [| !!y. y:A ==> False |] ==> A=0
-\tdx{equals0D}        [| A=0;  a:A |] ==> P
-
-\tdx{PowI}            A <= B ==> A : Pow(B)
-\tdx{PowD}            A : Pow(B)  ==>  A<=B
-\subcaption{The empty set; power sets}
-\end{ttbox}
-\caption{Basic derived rules for {\ZF}} \label{zf-lemmas1}
-\end{figure}
-
-
-\section{From basic lemmas to function spaces}
-Faced with so many definitions, it is essential to prove lemmas.  Even
-trivial theorems like $A \int B = B \int A$ would be difficult to
-prove from the definitions alone.  Isabelle's set theory derives many
-rules using a natural deduction style.  Ideally, a natural deduction
-rule should introduce or eliminate just one operator, but this is not
-always practical.  For most operators, we may forget its definition
-and use its derived rules instead.
-
-\subsection{Fundamental lemmas}
-Figure~\ref{zf-lemmas1} presents the derived rules for the most basic
-operators.  The rules for the bounded quantifiers resemble those for the
-ordinary quantifiers, but note that \tdx{ballE} uses a negated assumption
-in the style of Isabelle's classical reasoner.  The \rmindex{congruence
-  rules} \tdx{ball_cong} and \tdx{bex_cong} are required by Isabelle's
-simplifier, but have few other uses.  Congruence rules must be specially
-derived for all binding operators, and henceforth will not be shown.
-
-Figure~\ref{zf-lemmas1} also shows rules for the subset and equality
-relations (proof by extensionality), and rules about the empty set and the
-power set operator.
-
-Figure~\ref{zf-lemmas2} presents rules for replacement and separation.
-The rules for \cdx{Replace} and \cdx{RepFun} are much simpler than
-comparable rules for \texttt{PrimReplace} would be.  The principle of
-separation is proved explicitly, although most proofs should use the
-natural deduction rules for \texttt{Collect}.  The elimination rule
-\tdx{CollectE} is equivalent to the two destruction rules
-\tdx{CollectD1} and \tdx{CollectD2}, but each rule is suited to
-particular circumstances.  Although too many rules can be confusing, there
-is no reason to aim for a minimal set of rules.  See the file
-\texttt{ZF/ZF.ML} for a complete listing.
-
-Figure~\ref{zf-lemmas3} presents rules for general union and intersection.
-The empty intersection should be undefined.  We cannot have
-$\bigcap(\emptyset)=V$ because $V$, the universal class, is not a set.  All
-expressions denote something in {\ZF} set theory; the definition of
-intersection implies $\bigcap(\emptyset)=\emptyset$, but this value is
-arbitrary.  The rule \tdx{InterI} must have a premise to exclude
-the empty intersection.  Some of the laws governing intersections require
-similar premises.
-
-
-%the [p] gives better page breaking for the book
-\begin{figure}[p]
-\begin{ttbox}
-\tdx{ReplaceI}      [| x: A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
-              b : {\ttlbrace}y. x:A, P(x,y){\ttrbrace}
-
-\tdx{ReplaceE}      [| b : {\ttlbrace}y. x:A, P(x,y){\ttrbrace};  
-                 !!x. [| x: A;  P(x,b);  ALL y. P(x,y)-->y=b |] ==> R 
-              |] ==> R
-
-\tdx{RepFunI}       [| a : A |] ==> f(a) : {\ttlbrace}f(x). x:A{\ttrbrace}
-\tdx{RepFunE}       [| b : {\ttlbrace}f(x). x:A{\ttrbrace};  
-                 !!x.[| x:A;  b=f(x) |] ==> P |] ==> P
-
-\tdx{separation}     a : {\ttlbrace}x:A. P(x){\ttrbrace} <-> a:A & P(a)
-\tdx{CollectI}       [| a:A;  P(a) |] ==> a : {\ttlbrace}x:A. P(x){\ttrbrace}
-\tdx{CollectE}       [| a : {\ttlbrace}x:A. P(x){\ttrbrace};  [| a:A; P(a) |] ==> R |] ==> R
-\tdx{CollectD1}      a : {\ttlbrace}x:A. P(x){\ttrbrace} ==> a:A
-\tdx{CollectD2}      a : {\ttlbrace}x:A. P(x){\ttrbrace} ==> P(a)
-\end{ttbox}
-\caption{Replacement and separation} \label{zf-lemmas2}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{UnionI}    [| B: C;  A: B |] ==> A: Union(C)
-\tdx{UnionE}    [| A : Union(C);  !!B.[| A: B;  B: C |] ==> R |] ==> R
-
-\tdx{InterI}    [| !!x. x: C ==> A: x;  c:C |] ==> A : Inter(C)
-\tdx{InterD}    [| A : Inter(C);  B : C |] ==> A : B
-\tdx{InterE}    [| A : Inter(C);  A:B ==> R;  ~ B:C ==> R |] ==> R
-
-\tdx{UN_I}      [| a: A;  b: B(a) |] ==> b: (UN x:A. B(x))
-\tdx{UN_E}      [| b : (UN x:A. B(x));  !!x.[| x: A;  b: B(x) |] ==> R 
-          |] ==> R
-
-\tdx{INT_I}     [| !!x. x: A ==> b: B(x);  a: A |] ==> b: (INT x:A. B(x))
-\tdx{INT_E}     [| b : (INT x:A. B(x));  a: A |] ==> b : B(a)
-\end{ttbox}
-\caption{General union and intersection} \label{zf-lemmas3}
-\end{figure}
-
-
-%%% upair.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{pairing}      a:Upair(b,c) <-> (a=b | a=c)
-\tdx{UpairI1}      a : Upair(a,b)
-\tdx{UpairI2}      b : Upair(a,b)
-\tdx{UpairE}       [| a : Upair(b,c);  a = b ==> P;  a = c ==> P |] ==> P
-\end{ttbox}
-\caption{Unordered pairs} \label{zf-upair1}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{UnI1}         c : A ==> c : A Un B
-\tdx{UnI2}         c : B ==> c : A Un B
-\tdx{UnCI}         (~c : B ==> c : A) ==> c : A Un B
-\tdx{UnE}          [| c : A Un B;  c:A ==> P;  c:B ==> P |] ==> P
-
-\tdx{IntI}         [| c : A;  c : B |] ==> c : A Int B
-\tdx{IntD1}        c : A Int B ==> c : A
-\tdx{IntD2}        c : A Int B ==> c : B
-\tdx{IntE}         [| c : A Int B;  [| c:A; c:B |] ==> P |] ==> P
-
-\tdx{DiffI}        [| c : A;  ~ c : B |] ==> c : A - B
-\tdx{DiffD1}       c : A - B ==> c : A
-\tdx{DiffD2}       c : A - B ==> c ~: B
-\tdx{DiffE}        [| c : A - B;  [| c:A; ~ c:B |] ==> P |] ==> P
-\end{ttbox}
-\caption{Union, intersection, difference} \label{zf-Un}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{consI1}       a : cons(a,B)
-\tdx{consI2}       a : B ==> a : cons(b,B)
-\tdx{consCI}       (~ a:B ==> a=b) ==> a: cons(b,B)
-\tdx{consE}        [| a : cons(b,A);  a=b ==> P;  a:A ==> P |] ==> P
-
-\tdx{singletonI}   a : {\ttlbrace}a{\ttrbrace}
-\tdx{singletonE}   [| a : {\ttlbrace}b{\ttrbrace}; a=b ==> P |] ==> P
-\end{ttbox}
-\caption{Finite and singleton sets} \label{zf-upair2}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{succI1}       i : succ(i)
-\tdx{succI2}       i : j ==> i : succ(j)
-\tdx{succCI}       (~ i:j ==> i=j) ==> i: succ(j)
-\tdx{succE}        [| i : succ(j);  i=j ==> P;  i:j ==> P |] ==> P
-\tdx{succ_neq_0}   [| succ(n)=0 |] ==> P
-\tdx{succ_inject}  succ(m) = succ(n) ==> m=n
-\end{ttbox}
-\caption{The successor function} \label{zf-succ}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{the_equality}     [| P(a);  !!x. P(x) ==> x=a |] ==> (THE x. P(x)) = a
-\tdx{theI}             EX! x. P(x) ==> P(THE x. P(x))
-
-\tdx{if_P}              P ==> if(P,a,b) = a
-\tdx{if_not_P}         ~P ==> if(P,a,b) = b
-
-\tdx{mem_asym}         [| a:b;  b:a |] ==> P
-\tdx{mem_irrefl}       a:a ==> P
-\end{ttbox}
-\caption{Descriptions; non-circularity} \label{zf-the}
-\end{figure}
-
-
-\subsection{Unordered pairs and finite sets}
-Figure~\ref{zf-upair1} presents the principle of unordered pairing, along
-with its derived rules.  Binary union and intersection are defined in terms
-of ordered pairs (Fig.\ts\ref{zf-Un}).  Set difference is also included.  The
-rule \tdx{UnCI} is useful for classical reasoning about unions,
-like \texttt{disjCI}\@; it supersedes \tdx{UnI1} and
-\tdx{UnI2}, but these rules are often easier to work with.  For
-intersection and difference we have both elimination and destruction rules.
-Again, there is no reason to provide a minimal rule set.
-
-Figure~\ref{zf-upair2} is concerned with finite sets: it presents rules
-for~\texttt{cons}, the finite set constructor, and rules for singleton
-sets.  Figure~\ref{zf-succ} presents derived rules for the successor
-function, which is defined in terms of~\texttt{cons}.  The proof that {\tt
-  succ} is injective appears to require the Axiom of Foundation.
-
-Definite descriptions (\sdx{THE}) are defined in terms of the singleton
-set~$\{0\}$, but their derived rules fortunately hide this
-(Fig.\ts\ref{zf-the}).  The rule~\tdx{theI} is difficult to apply
-because of the two occurrences of~$\Var{P}$.  However,
-\tdx{the_equality} does not have this problem and the files contain
-many examples of its use.
-
-Finally, the impossibility of having both $a\in b$ and $b\in a$
-(\tdx{mem_asym}) is proved by applying the Axiom of Foundation to
-the set $\{a,b\}$.  The impossibility of $a\in a$ is a trivial consequence.
-
-See the file \texttt{ZF/upair.ML} for full proofs of the rules discussed in
-this section.
-
-
-%%% subset.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{Union_upper}       B:A ==> B <= Union(A)
-\tdx{Union_least}       [| !!x. x:A ==> x<=C |] ==> Union(A) <= C
-
-\tdx{Inter_lower}       B:A ==> Inter(A) <= B
-\tdx{Inter_greatest}    [| a:A;  !!x. x:A ==> C<=x |] ==> C <= Inter(A)
-
-\tdx{Un_upper1}         A <= A Un B
-\tdx{Un_upper2}         B <= A Un B
-\tdx{Un_least}          [| A<=C;  B<=C |] ==> A Un B <= C
-
-\tdx{Int_lower1}        A Int B <= A
-\tdx{Int_lower2}        A Int B <= B
-\tdx{Int_greatest}      [| C<=A;  C<=B |] ==> C <= A Int B
-
-\tdx{Diff_subset}       A-B <= A
-\tdx{Diff_contains}     [| C<=A;  C Int B = 0 |] ==> C <= A-B
-
-\tdx{Collect_subset}    Collect(A,P) <= A
-\end{ttbox}
-\caption{Subset and lattice properties} \label{zf-subset}
-\end{figure}
-
-
-\subsection{Subset and lattice properties}
-The subset relation is a complete lattice.  Unions form least upper bounds;
-non-empty intersections form greatest lower bounds.  Figure~\ref{zf-subset}
-shows the corresponding rules.  A few other laws involving subsets are
-included.  Proofs are in the file \texttt{ZF/subset.ML}.
-
-Reasoning directly about subsets often yields clearer proofs than
-reasoning about the membership relation.  Section~\ref{sec:ZF-pow-example}
-below presents an example of this, proving the equation ${{\tt Pow}(A)\cap
-  {\tt Pow}(B)}= {\tt Pow}(A\cap B)$.
-
-%%% pair.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{Pair_inject1}    <a,b> = <c,d> ==> a=c
-\tdx{Pair_inject2}    <a,b> = <c,d> ==> b=d
-\tdx{Pair_inject}     [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
-\tdx{Pair_neq_0}      <a,b>=0 ==> P
-
-\tdx{fst_conv}        fst(<a,b>) = a
-\tdx{snd_conv}        snd(<a,b>) = b
-\tdx{split}           split(\%x y. c(x,y), <a,b>) = c(a,b)
-
-\tdx{SigmaI}          [| a:A;  b:B(a) |] ==> <a,b> : Sigma(A,B)
-
-\tdx{SigmaE}          [| c: Sigma(A,B);  
-                   !!x y.[| x:A; y:B(x); c=<x,y> |] ==> P |] ==> P
-
-\tdx{SigmaE2}         [| <a,b> : Sigma(A,B);    
-                   [| a:A;  b:B(a) |] ==> P   |] ==> P
-\end{ttbox}
-\caption{Ordered pairs; projections; general sums} \label{zf-pair}
-\end{figure}
-
-
-\subsection{Ordered pairs}
-Figure~\ref{zf-pair} presents the rules governing ordered pairs,
-projections and general sums.  File \texttt{ZF/pair.ML} contains the
-full (and tedious) proof that $\{\{a\},\{a,b\}\}$ functions as an ordered
-pair.  This property is expressed as two destruction rules,
-\tdx{Pair_inject1} and \tdx{Pair_inject2}, and equivalently
-as the elimination rule \tdx{Pair_inject}.
-
-The rule \tdx{Pair_neq_0} asserts $\pair{a,b}\neq\emptyset$.  This
-is a property of $\{\{a\},\{a,b\}\}$, and need not hold for other 
-encodings of ordered pairs.  The non-standard ordered pairs mentioned below
-satisfy $\pair{\emptyset;\emptyset}=\emptyset$.
-
-The natural deduction rules \tdx{SigmaI} and \tdx{SigmaE}
-assert that \cdx{Sigma}$(A,B)$ consists of all pairs of the form
-$\pair{x,y}$, for $x\in A$ and $y\in B(x)$.  The rule \tdx{SigmaE2}
-merely states that $\pair{a,b}\in \texttt{Sigma}(A,B)$ implies $a\in A$ and
-$b\in B(a)$.
-
-In addition, it is possible to use tuples as patterns in abstractions:
-\begin{center}
-{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \texttt{split(\%$x$ $y$. $t$)}
-\end{center}
-Nested patterns are translated recursively:
-{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
-\texttt{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \texttt{split(\%$x$. split(\%$y$
-  $z$. $t$))}.  The reverse translation is performed upon printing.
-\begin{warn}
-  The translation between patterns and \texttt{split} is performed automatically
-  by the parser and printer.  Thus the internal and external form of a term
-  may differ, which affects proofs.  For example the term {\tt
-    (\%<x,y>.<y,x>)<a,b>} requires the theorem \texttt{split} to rewrite to
-  {\tt<b,a>}.
-\end{warn}
-In addition to explicit $\lambda$-abstractions, patterns can be used in any
-variable binding construct which is internally described by a
-$\lambda$-abstraction.  Some important examples are
-\begin{description}
-\item[Let:] \texttt{let {\it pattern} = $t$ in $u$}
-\item[Choice:] \texttt{THE~{\it pattern}~.~$P$}
-\item[Set operations:] \texttt{UN~{\it pattern}:$A$.~$B$}
-\item[Comprehension:] \texttt{{\ttlbrace}~{\it pattern}:$A$~.~$P$~{\ttrbrace}}
-\end{description}
-
-
-%%% domrange.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{domainI}        <a,b>: r ==> a : domain(r)
-\tdx{domainE}        [| a : domain(r);  !!y. <a,y>: r ==> P |] ==> P
-\tdx{domain_subset}  domain(Sigma(A,B)) <= A
-
-\tdx{rangeI}         <a,b>: r ==> b : range(r)
-\tdx{rangeE}         [| b : range(r);  !!x. <x,b>: r ==> P |] ==> P
-\tdx{range_subset}   range(A*B) <= B
-
-\tdx{fieldI1}        <a,b>: r ==> a : field(r)
-\tdx{fieldI2}        <a,b>: r ==> b : field(r)
-\tdx{fieldCI}        (~ <c,a>:r ==> <a,b>: r) ==> a : field(r)
-
-\tdx{fieldE}         [| a : field(r);  
-                  !!x. <a,x>: r ==> P;  
-                  !!x. <x,a>: r ==> P      
-               |] ==> P
-
-\tdx{field_subset}   field(A*A) <= A
-\end{ttbox}
-\caption{Domain, range and field of a relation} \label{zf-domrange}
-\end{figure}
-
-\begin{figure}
-\begin{ttbox}
-\tdx{imageI}         [| <a,b>: r;  a:A |] ==> b : r``A
-\tdx{imageE}         [| b: r``A;  !!x.[| <x,b>: r;  x:A |] ==> P |] ==> P
-
-\tdx{vimageI}        [| <a,b>: r;  b:B |] ==> a : r-``B
-\tdx{vimageE}        [| a: r-``B;  !!x.[| <a,x>: r;  x:B |] ==> P |] ==> P
-\end{ttbox}
-\caption{Image and inverse image} \label{zf-domrange2}
-\end{figure}
-
-
-\subsection{Relations}
-Figure~\ref{zf-domrange} presents rules involving relations, which are sets
-of ordered pairs.  The converse of a relation~$r$ is the set of all pairs
-$\pair{y,x}$ such that $\pair{x,y}\in r$; if $r$ is a function, then
-{\cdx{converse}$(r)$} is its inverse.  The rules for the domain
-operation, namely \tdx{domainI} and~\tdx{domainE}, assert that
-\cdx{domain}$(r)$ consists of all~$x$ such that $r$ contains
-some pair of the form~$\pair{x,y}$.  The range operation is similar, and
-the field of a relation is merely the union of its domain and range.  
-
-Figure~\ref{zf-domrange2} presents rules for images and inverse images.
-Note that these operations are generalisations of range and domain,
-respectively.  See the file \texttt{ZF/domrange.ML} for derivations of the
-rules.
-
-
-%%% func.ML
-
-\begin{figure}
-\begin{ttbox}
-\tdx{fun_is_rel}      f: Pi(A,B) ==> f <= Sigma(A,B)
-
-\tdx{apply_equality}  [| <a,b>: f;  f: Pi(A,B) |] ==> f`a = b
-\tdx{apply_equality2} [| <a,b>: f;  <a,c>: f;  f: Pi(A,B) |] ==> b=c
-
-\tdx{apply_type}      [| f: Pi(A,B);  a:A |] ==> f`a : B(a)
-\tdx{apply_Pair}      [| f: Pi(A,B);  a:A |] ==> <a,f`a>: f
-\tdx{apply_iff}       f: Pi(A,B) ==> <a,b>: f <-> a:A & f`a = b
-
-\tdx{fun_extension}   [| f : Pi(A,B);  g: Pi(A,D);
-                   !!x. x:A ==> f`x = g`x     |] ==> f=g
-
-\tdx{domain_type}     [| <a,b> : f;  f: Pi(A,B) |] ==> a : A
-\tdx{range_type}      [| <a,b> : f;  f: Pi(A,B) |] ==> b : B(a)
-
-\tdx{Pi_type}         [| f: A->C;  !!x. x:A ==> f`x: B(x) |] ==> f: Pi(A,B)
-\tdx{domain_of_fun}   f: Pi(A,B) ==> domain(f)=A
-\tdx{range_of_fun}    f: Pi(A,B) ==> f: A->range(f)
-
-\tdx{restrict}        a : A ==> restrict(f,A) ` a = f`a
-\tdx{restrict_type}   [| !!x. x:A ==> f`x: B(x) |] ==> 
-                restrict(f,A) : Pi(A,B)
-\end{ttbox}
-\caption{Functions} \label{zf-func1}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{lamI}         a:A ==> <a,b(a)> : (lam x:A. b(x))
-\tdx{lamE}         [| p: (lam x:A. b(x));  !!x.[| x:A; p=<x,b(x)> |] ==> P 
-             |] ==>  P
-
-\tdx{lam_type}     [| !!x. x:A ==> b(x): B(x) |] ==> (lam x:A. b(x)) : Pi(A,B)
-
-\tdx{beta}         a : A ==> (lam x:A. b(x)) ` a = b(a)
-\tdx{eta}          f : Pi(A,B) ==> (lam x:A. f`x) = f
-\end{ttbox}
-\caption{$\lambda$-abstraction} \label{zf-lam}
-\end{figure}
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{fun_empty}            0: 0->0
-\tdx{fun_single}           {\ttlbrace}<a,b>{\ttrbrace} : {\ttlbrace}a{\ttrbrace} -> {\ttlbrace}b{\ttrbrace}
-
-\tdx{fun_disjoint_Un}      [| f: A->B;  g: C->D;  A Int C = 0  |] ==>  
-                     (f Un g) : (A Un C) -> (B Un D)
-
-\tdx{fun_disjoint_apply1}  [| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  
-                     (f Un g)`a = f`a
-
-\tdx{fun_disjoint_apply2}  [| c:C;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  
-                     (f Un g)`c = g`c
-\end{ttbox}
-\caption{Constructing functions from smaller sets} \label{zf-func2}
-\end{figure}
-
-
-\subsection{Functions}
-Functions, represented by graphs, are notoriously difficult to reason
-about.  The file \texttt{ZF/func.ML} derives many rules, which overlap more
-than they ought.  This section presents the more important rules.
-
-Figure~\ref{zf-func1} presents the basic properties of \cdx{Pi}$(A,B)$,
-the generalized function space.  For example, if $f$ is a function and
-$\pair{a,b}\in f$, then $f`a=b$ (\tdx{apply_equality}).  Two functions
-are equal provided they have equal domains and deliver equals results
-(\tdx{fun_extension}).
-
-By \tdx{Pi_type}, a function typing of the form $f\in A\to C$ can be
-refined to the dependent typing $f\in\prod@{x\in A}B(x)$, given a suitable
-family of sets $\{B(x)\}@{x\in A}$.  Conversely, by \tdx{range_of_fun},
-any dependent typing can be flattened to yield a function type of the form
-$A\to C$; here, $C={\tt range}(f)$.
-
-Among the laws for $\lambda$-abstraction, \tdx{lamI} and \tdx{lamE}
-describe the graph of the generated function, while \tdx{beta} and
-\tdx{eta} are the standard conversions.  We essentially have a
-dependently-typed $\lambda$-calculus (Fig.\ts\ref{zf-lam}).
-
-Figure~\ref{zf-func2} presents some rules that can be used to construct
-functions explicitly.  We start with functions consisting of at most one
-pair, and may form the union of two functions provided their domains are
-disjoint.  
-
-
-\begin{figure}
-\begin{ttbox}
-\tdx{Int_absorb}         A Int A = A
-\tdx{Int_commute}        A Int B = B Int A
-\tdx{Int_assoc}          (A Int B) Int C  =  A Int (B Int C)
-\tdx{Int_Un_distrib}     (A Un B) Int C  =  (A Int C) Un (B Int C)
-
-\tdx{Un_absorb}          A Un A = A
-\tdx{Un_commute}         A Un B = B Un A
-\tdx{Un_assoc}           (A Un B) Un C  =  A Un (B Un C)
-\tdx{Un_Int_distrib}     (A Int B) Un C  =  (A Un C) Int (B Un C)
-
-\tdx{Diff_cancel}        A-A = 0
-\tdx{Diff_disjoint}      A Int (B-A) = 0
-\tdx{Diff_partition}     A<=B ==> A Un (B-A) = B
-\tdx{double_complement}  [| A<=B; B<= C |] ==> (B - (C-A)) = A
-\tdx{Diff_Un}            A - (B Un C) = (A-B) Int (A-C)
-\tdx{Diff_Int}           A - (B Int C) = (A-B) Un (A-C)
-
-\tdx{Union_Un_distrib}   Union(A Un B) = Union(A) Un Union(B)
-\tdx{Inter_Un_distrib}   [| a:A;  b:B |] ==> 
-                   Inter(A Un B) = Inter(A) Int Inter(B)
-
-\tdx{Int_Union_RepFun}   A Int Union(B) = (UN C:B. A Int C)
-
-\tdx{Un_Inter_RepFun}    b:B ==> 
-                   A Un Inter(B) = (INT C:B. A Un C)
-
-\tdx{SUM_Un_distrib1}    (SUM x:A Un B. C(x)) = 
-                   (SUM x:A. C(x)) Un (SUM x:B. C(x))
-
-\tdx{SUM_Un_distrib2}    (SUM x:C. A(x) Un B(x)) =
-                   (SUM x:C. A(x))  Un  (SUM x:C. B(x))
-
-\tdx{SUM_Int_distrib1}   (SUM x:A Int B. C(x)) =
-                   (SUM x:A. C(x)) Int (SUM x:B. C(x))
-
-\tdx{SUM_Int_distrib2}   (SUM x:C. A(x) Int B(x)) =
-                   (SUM x:C. A(x)) Int (SUM x:C. B(x))
-\end{ttbox}
-\caption{Equalities} \label{zf-equalities}
-\end{figure}
-
-
-\begin{figure}
-%\begin{constants} 
-%  \cdx{1}       & $i$           &       & $\{\emptyset\}$       \\
-%  \cdx{bool}    & $i$           &       & the set $\{\emptyset,1\}$     \\
-%  \cdx{cond}   & $[i,i,i]\To i$ &       & conditional for \texttt{bool}    \\
-%  \cdx{not}    & $i\To i$       &       & negation for \texttt{bool}       \\
-%  \sdx{and}    & $[i,i]\To i$   & Left 70 & conjunction for \texttt{bool}  \\
-%  \sdx{or}     & $[i,i]\To i$   & Left 65 & disjunction for \texttt{bool}  \\
-%  \sdx{xor}    & $[i,i]\To i$   & Left 65 & exclusive-or for \texttt{bool}
-%\end{constants}
-%
-\begin{ttbox}
-\tdx{bool_def}       bool == {\ttlbrace}0,1{\ttrbrace}
-\tdx{cond_def}       cond(b,c,d) == if(b=1,c,d)
-\tdx{not_def}        not(b)  == cond(b,0,1)
-\tdx{and_def}        a and b == cond(a,b,0)
-\tdx{or_def}         a or b  == cond(a,1,b)
-\tdx{xor_def}        a xor b == cond(a,not(b),b)
-
-\tdx{bool_1I}        1 : bool
-\tdx{bool_0I}        0 : bool
-\tdx{boolE}          [| c: bool;  c=1 ==> P;  c=0 ==> P |] ==> P
-\tdx{cond_1}         cond(1,c,d) = c
-\tdx{cond_0}         cond(0,c,d) = d
-\end{ttbox}
-\caption{The booleans} \label{zf-bool}
-\end{figure}
-
-
-\section{Further developments}
-The next group of developments is complex and extensive, and only
-highlights can be covered here.  It involves many theories and ML files of
-proofs. 
-
-Figure~\ref{zf-equalities} presents commutative, associative, distributive,
-and idempotency laws of union and intersection, along with other equations.
-See file \texttt{ZF/equalities.ML}.
-
-Theory \thydx{Bool} defines $\{0,1\}$ as a set of booleans, with the usual
-operators including a conditional (Fig.\ts\ref{zf-bool}).  Although {\ZF} is a
-first-order theory, you can obtain the effect of higher-order logic using
-\texttt{bool}-valued functions, for example.  The constant~\texttt{1} is
-translated to \texttt{succ(0)}.
-
-\begin{figure}
-\index{*"+ symbol}
-\begin{constants}
-  \it symbol    & \it meta-type & \it priority & \it description \\ 
-  \tt +         & $[i,i]\To i$  &  Right 65     & disjoint union operator\\
-  \cdx{Inl}~~\cdx{Inr}  & $i\To i$      &       & injections\\
-  \cdx{case}    & $[i\To i,i\To i, i]\To i$ &   & conditional for $A+B$
-\end{constants}
-\begin{ttbox}
-\tdx{sum_def}        A+B == {\ttlbrace}0{\ttrbrace}*A Un {\ttlbrace}1{\ttrbrace}*B
-\tdx{Inl_def}        Inl(a) == <0,a>
-\tdx{Inr_def}        Inr(b) == <1,b>
-\tdx{case_def}       case(c,d,u) == split(\%y z. cond(y, d(z), c(z)), u)
-
-\tdx{sum_InlI}       a : A ==> Inl(a) : A+B
-\tdx{sum_InrI}       b : B ==> Inr(b) : A+B
-
-\tdx{Inl_inject}     Inl(a)=Inl(b) ==> a=b
-\tdx{Inr_inject}     Inr(a)=Inr(b) ==> a=b
-\tdx{Inl_neq_Inr}    Inl(a)=Inr(b) ==> P
-
-\tdx{sumE2}   u: A+B ==> (EX x. x:A & u=Inl(x)) | (EX y. y:B & u=Inr(y))
-
-\tdx{case_Inl}       case(c,d,Inl(a)) = c(a)
-\tdx{case_Inr}       case(c,d,Inr(b)) = d(b)
-\end{ttbox}
-\caption{Disjoint unions} \label{zf-sum}
-\end{figure}
-
-
-Theory \thydx{Sum} defines the disjoint union of two sets, with
-injections and a case analysis operator (Fig.\ts\ref{zf-sum}).  Disjoint
-unions play a role in datatype definitions, particularly when there is
-mutual recursion~\cite{paulson-set-II}.
-
-\begin{figure}
-\begin{ttbox}
-\tdx{QPair_def}       <a;b> == a+b
-\tdx{qsplit_def}      qsplit(c,p)  == THE y. EX a b. p=<a;b> & y=c(a,b)
-\tdx{qfsplit_def}     qfsplit(R,z) == EX x y. z=<x;y> & R(x,y)
-\tdx{qconverse_def}   qconverse(r) == {\ttlbrace}z. w:r, EX x y. w=<x;y> & z=<y;x>{\ttrbrace}
-\tdx{QSigma_def}      QSigma(A,B)  == UN x:A. UN y:B(x). {\ttlbrace}<x;y>{\ttrbrace}
-
-\tdx{qsum_def}        A <+> B      == ({\ttlbrace}0{\ttrbrace} <*> A) Un ({\ttlbrace}1{\ttrbrace} <*> B)
-\tdx{QInl_def}        QInl(a)      == <0;a>
-\tdx{QInr_def}        QInr(b)      == <1;b>
-\tdx{qcase_def}       qcase(c,d)   == qsplit(\%y z. cond(y, d(z), c(z)))
-\end{ttbox}
-\caption{Non-standard pairs, products and sums} \label{zf-qpair}
-\end{figure}
-
-Theory \thydx{QPair} defines a notion of ordered pair that admits
-non-well-founded tupling (Fig.\ts\ref{zf-qpair}).  Such pairs are written
-{\tt<$a$;$b$>}.  It also defines the eliminator \cdx{qsplit}, the
-converse operator \cdx{qconverse}, and the summation operator
-\cdx{QSigma}.  These are completely analogous to the corresponding
-versions for standard ordered pairs.  The theory goes on to define a
-non-standard notion of disjoint sum using non-standard pairs.  All of these
-concepts satisfy the same properties as their standard counterparts; in
-addition, {\tt<$a$;$b$>} is continuous.  The theory supports coinductive
-definitions, for example of infinite lists~\cite{paulson-final}.
-
-\begin{figure}
-\begin{ttbox}
-\tdx{bnd_mono_def}   bnd_mono(D,h) == 
-                 h(D)<=D & (ALL W X. W<=X --> X<=D --> h(W) <= h(X))
-
-\tdx{lfp_def}        lfp(D,h) == Inter({\ttlbrace}X: Pow(D). h(X) <= X{\ttrbrace})
-\tdx{gfp_def}        gfp(D,h) == Union({\ttlbrace}X: Pow(D). X <= h(X){\ttrbrace})
-
-
-\tdx{lfp_lowerbound} [| h(A) <= A;  A<=D |] ==> lfp(D,h) <= A
-
-\tdx{lfp_subset}     lfp(D,h) <= D
-
-\tdx{lfp_greatest}   [| bnd_mono(D,h);  
-                  !!X. [| h(X) <= X;  X<=D |] ==> A<=X 
-               |] ==> A <= lfp(D,h)
-
-\tdx{lfp_Tarski}     bnd_mono(D,h) ==> lfp(D,h) = h(lfp(D,h))
-
-\tdx{induct}         [| a : lfp(D,h);  bnd_mono(D,h);
-                  !!x. x : h(Collect(lfp(D,h),P)) ==> P(x)
-               |] ==> P(a)
-
-\tdx{lfp_mono}       [| bnd_mono(D,h);  bnd_mono(E,i);
-                  !!X. X<=D ==> h(X) <= i(X)  
-               |] ==> lfp(D,h) <= lfp(E,i)
-
-\tdx{gfp_upperbound} [| A <= h(A);  A<=D |] ==> A <= gfp(D,h)
-
-\tdx{gfp_subset}     gfp(D,h) <= D
-
-\tdx{gfp_least}      [| bnd_mono(D,h);  
-                  !!X. [| X <= h(X);  X<=D |] ==> X<=A
-               |] ==> gfp(D,h) <= A
-
-\tdx{gfp_Tarski}     bnd_mono(D,h) ==> gfp(D,h) = h(gfp(D,h))
-
-\tdx{coinduct}       [| bnd_mono(D,h); a: X; X <= h(X Un gfp(D,h)); X <= D 
-               |] ==> a : gfp(D,h)
-
-\tdx{gfp_mono}       [| bnd_mono(D,h);  D <= E;
-                  !!X. X<=D ==> h(X) <= i(X)  
-               |] ==> gfp(D,h) <= gfp(E,i)
-\end{ttbox}
-\caption{Least and greatest fixedpoints} \label{zf-fixedpt}
-\end{figure}
-
-The Knaster-Tarski Theorem states that every monotone function over a
-complete lattice has a fixedpoint.  Theory \thydx{Fixedpt} proves the
-Theorem only for a particular lattice, namely the lattice of subsets of a
-set (Fig.\ts\ref{zf-fixedpt}).  The theory defines least and greatest
-fixedpoint operators with corresponding induction and coinduction rules.
-These are essential to many definitions that follow, including the natural
-numbers and the transitive closure operator.  The (co)inductive definition
-package also uses the fixedpoint operators~\cite{paulson-CADE}.  See
-Davey and Priestley~\cite{davey&priestley} for more on the Knaster-Tarski
-Theorem and my paper~\cite{paulson-set-II} for discussion of the Isabelle
-proofs.
-
-Monotonicity properties are proved for most of the set-forming operations:
-union, intersection, Cartesian product, image, domain, range, etc.  These
-are useful for applying the Knaster-Tarski Fixedpoint Theorem.  The proofs
-themselves are trivial applications of Isabelle's classical reasoner.  See
-file \texttt{ZF/mono.ML}.
-
-
-\begin{figure}
-\begin{constants} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \sdx{O}       & $[i,i]\To i$  &  Right 60     & composition ($\circ$) \\
-  \cdx{id}      & $i\To i$      &       & identity function \\
-  \cdx{inj}     & $[i,i]\To i$  &       & injective function space\\
-  \cdx{surj}    & $[i,i]\To i$  &       & surjective function space\\
-  \cdx{bij}     & $[i,i]\To i$  &       & bijective function space
-\end{constants}
-
-\begin{ttbox}
-\tdx{comp_def}  r O s     == {\ttlbrace}xz : domain(s)*range(r) . 
-                        EX x y z. xz=<x,z> & <x,y>:s & <y,z>:r{\ttrbrace}
-\tdx{id_def}    id(A)     == (lam x:A. x)
-\tdx{inj_def}   inj(A,B)  == {\ttlbrace} f: A->B. ALL w:A. ALL x:A. f`w=f`x --> w=x {\ttrbrace}
-\tdx{surj_def}  surj(A,B) == {\ttlbrace} f: A->B . ALL y:B. EX x:A. f`x=y {\ttrbrace}
-\tdx{bij_def}   bij(A,B)  == inj(A,B) Int surj(A,B)
-
-
-\tdx{left_inverse}     [| f: inj(A,B);  a: A |] ==> converse(f)`(f`a) = a
-\tdx{right_inverse}    [| f: inj(A,B);  b: range(f) |] ==> 
-                 f`(converse(f)`b) = b
-
-\tdx{inj_converse_inj} f: inj(A,B) ==> converse(f): inj(range(f), A)
-\tdx{bij_converse_bij} f: bij(A,B) ==> converse(f): bij(B,A)
-
-\tdx{comp_type}        [| s<=A*B;  r<=B*C |] ==> (r O s) <= A*C
-\tdx{comp_assoc}       (r O s) O t = r O (s O t)
-
-\tdx{left_comp_id}     r<=A*B ==> id(B) O r = r
-\tdx{right_comp_id}    r<=A*B ==> r O id(A) = r
-
-\tdx{comp_func}        [| g:A->B; f:B->C |] ==> (f O g):A->C
-\tdx{comp_func_apply}  [| g:A->B; f:B->C; a:A |] ==> (f O g)`a = f`(g`a)
-
-\tdx{comp_inj}         [| g:inj(A,B);  f:inj(B,C)  |] ==> (f O g):inj(A,C)
-\tdx{comp_surj}        [| g:surj(A,B); f:surj(B,C) |] ==> (f O g):surj(A,C)
-\tdx{comp_bij}         [| g:bij(A,B); f:bij(B,C) |] ==> (f O g):bij(A,C)
-
-\tdx{left_comp_inverse}     f: inj(A,B) ==> converse(f) O f = id(A)
-\tdx{right_comp_inverse}    f: surj(A,B) ==> f O converse(f) = id(B)
-
-\tdx{bij_disjoint_Un}   
-    [| f: bij(A,B);  g: bij(C,D);  A Int C = 0;  B Int D = 0 |] ==> 
-    (f Un g) : bij(A Un C, B Un D)
-
-\tdx{restrict_bij}  [| f:inj(A,B);  C<=A |] ==> restrict(f,C): bij(C, f``C)
-\end{ttbox}
-\caption{Permutations} \label{zf-perm}
-\end{figure}
-
-The theory \thydx{Perm} is concerned with permutations (bijections) and
-related concepts.  These include composition of relations, the identity
-relation, and three specialized function spaces: injective, surjective and
-bijective.  Figure~\ref{zf-perm} displays many of their properties that
-have been proved.  These results are fundamental to a treatment of
-equipollence and cardinality.
-
-\begin{figure}\small
-\index{#*@{\tt\#*} symbol}
-\index{*div symbol}
-\index{*mod symbol}
-\index{#+@{\tt\#+} symbol}
-\index{#-@{\tt\#-} symbol}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{nat}     & $i$                   &       & set of natural numbers \\
-  \cdx{nat_case}& $[i,i\To i,i]\To i$     &     & conditional for $nat$\\
-  \cdx{rec}     & $[i,i,[i,i]\To i]\To i$ &     & recursor for $nat$\\
-  \tt \#*       & $[i,i]\To i$  &  Left 70      & multiplication \\
-  \tt div       & $[i,i]\To i$  &  Left 70      & division\\
-  \tt mod       & $[i,i]\To i$  &  Left 70      & modulus\\
-  \tt \#+       & $[i,i]\To i$  &  Left 65      & addition\\
-  \tt \#-       & $[i,i]\To i$  &  Left 65      & subtraction
-\end{constants}
-
-\begin{ttbox}
-\tdx{nat_def}       nat == lfp(lam r: Pow(Inf). {\ttlbrace}0{\ttrbrace} Un {\ttlbrace}succ(x). x:r{\ttrbrace}
-
-\tdx{nat_case_def}  nat_case(a,b,k) == 
-              THE y. k=0 & y=a | (EX x. k=succ(x) & y=b(x))
-
-\tdx{rec_def}       rec(k,a,b) ==  
-              transrec(k, \%n f. nat_case(a, \%m. b(m, f`m), n))
-
-\tdx{add_def}       m#+n    == rec(m, n, \%u v. succ(v))
-\tdx{diff_def}      m#-n    == rec(n, m, \%u v. rec(v, 0, \%x y. x))
-\tdx{mult_def}      m#*n    == rec(m, 0, \%u v. n #+ v)
-\tdx{mod_def}       m mod n == transrec(m, \%j f. if(j:n, j, f`(j#-n)))
-\tdx{div_def}       m div n == transrec(m, \%j f. if(j:n, 0, succ(f`(j#-n))))
-
-
-\tdx{nat_0I}        0 : nat
-\tdx{nat_succI}     n : nat ==> succ(n) : nat
-
-\tdx{nat_induct}        
-    [| n: nat;  P(0);  !!x. [| x: nat;  P(x) |] ==> P(succ(x)) 
-    |] ==> P(n)
-
-\tdx{nat_case_0}    nat_case(a,b,0) = a
-\tdx{nat_case_succ} nat_case(a,b,succ(m)) = b(m)
-
-\tdx{rec_0}         rec(0,a,b) = a
-\tdx{rec_succ}      rec(succ(m),a,b) = b(m, rec(m,a,b))
-
-\tdx{mult_type}     [| m:nat;  n:nat |] ==> m #* n : nat
-\tdx{mult_0}        0 #* n = 0
-\tdx{mult_succ}     succ(m) #* n = n #+ (m #* n)
-\tdx{mult_commute}  [| m:nat; n:nat |] ==> m #* n = n #* m
-\tdx{add_mult_dist} [| m:nat; k:nat |] ==> (m #+ n) #* k = (m #* k){\thinspace}#+{\thinspace}(n #* k)
-\tdx{mult_assoc}
-    [| m:nat;  n:nat;  k:nat |] ==> (m #* n) #* k = m #* (n #* k)
-\tdx{mod_quo_equality}
-    [| 0:n;  m:nat;  n:nat |] ==> (m div n)#*n #+ m mod n = m
-\end{ttbox}
-\caption{The natural numbers} \label{zf-nat}
-\end{figure}
-
-Theory \thydx{Nat} defines the natural numbers and mathematical
-induction, along with a case analysis operator.  The set of natural
-numbers, here called \texttt{nat}, is known in set theory as the ordinal~$\omega$.
-
-Theory \thydx{Arith} defines primitive recursion and goes on to develop
-arithmetic on the natural numbers (Fig.\ts\ref{zf-nat}).  It defines
-addition, multiplication, subtraction, division, and remainder.  Many of
-their properties are proved: commutative, associative and distributive
-laws, identity and cancellation laws, etc.  The most interesting result is
-perhaps the theorem $a \bmod b + (a/b)\times b = a$.  Division and
-remainder are defined by repeated subtraction, which requires well-founded
-rather than primitive recursion; the termination argument relies on the
-divisor's being non-zero.
-
-Theory \thydx{Univ} defines a `universe' $\texttt{univ}(A)$, for
-constructing datatypes such as trees.  This set contains $A$ and the
-natural numbers.  Vitally, it is closed under finite products: ${\tt
-  univ}(A)\times{\tt univ}(A)\subseteq{\tt univ}(A)$.  This theory also
-defines the cumulative hierarchy of axiomatic set theory, which
-traditionally is written $V@\alpha$ for an ordinal~$\alpha$.  The
-`universe' is a simple generalization of~$V@\omega$.
-
-Theory \thydx{QUniv} defines a `universe' ${\tt quniv}(A)$, for
-constructing codatatypes such as streams.  It is analogous to ${\tt
-  univ}(A)$ (and is defined in terms of it) but is closed under the
-non-standard product and sum.
-
-Theory \texttt{Finite} (Figure~\ref{zf-fin}) defines the finite set operator;
-${\tt Fin}(A)$ is the set of all finite sets over~$A$.  The theory employs
-Isabelle's inductive definition package, which proves various rules
-automatically.  The induction rule shown is stronger than the one proved by
-the package.  The theory also defines the set of all finite functions
-between two given sets.
-
-\begin{figure}
-\begin{ttbox}
-\tdx{Fin.emptyI}      0 : Fin(A)
-\tdx{Fin.consI}       [| a: A;  b: Fin(A) |] ==> cons(a,b) : Fin(A)
-
-\tdx{Fin_induct}
-    [| b: Fin(A);
-       P(0);
-       !!x y. [| x: A;  y: Fin(A);  x~:y;  P(y) |] ==> P(cons(x,y))
-    |] ==> P(b)
-
-\tdx{Fin_mono}        A<=B ==> Fin(A) <= Fin(B)
-\tdx{Fin_UnI}         [| b: Fin(A);  c: Fin(A) |] ==> b Un c : Fin(A)
-\tdx{Fin_UnionI}      C : Fin(Fin(A)) ==> Union(C) : Fin(A)
-\tdx{Fin_subset}      [| c<=b;  b: Fin(A) |] ==> c: Fin(A)
-\end{ttbox}
-\caption{The finite set operator} \label{zf-fin}
-\end{figure}
-
-\begin{figure}
-\begin{constants}
-  \cdx{list}    & $i\To i$      && lists over some set\\
-  \cdx{list_case} & $[i, [i,i]\To i, i] \To i$  && conditional for $list(A)$ \\
-  \cdx{list_rec} & $[i, i, [i,i,i]\To i] \To i$ && recursor for $list(A)$ \\
-  \cdx{map}     & $[i\To i, i] \To i$   &       & mapping functional\\
-  \cdx{length}  & $i\To i$              &       & length of a list\\
-  \cdx{rev}     & $i\To i$              &       & reverse of a list\\
-  \tt \at       & $[i,i]\To i$  &  Right 60     & append for lists\\
-  \cdx{flat}    & $i\To i$   &                  & append of list of lists
-\end{constants}
-
-\underscoreon %%because @ is used here
-\begin{ttbox}
-\tdx{list_rec_def}    list_rec(l,c,h) == 
-                Vrec(l, \%l g. list_case(c, \%x xs. h(x, xs, g`xs), l))
-
-\tdx{map_def}         map(f,l)  == list_rec(l,  0,  \%x xs r. <f(x), r>)
-\tdx{length_def}      length(l) == list_rec(l,  0,  \%x xs r. succ(r))
-\tdx{app_def}         xs@ys     == list_rec(xs, ys, \%x xs r. <x,r>)
-\tdx{rev_def}         rev(l)    == list_rec(l,  0,  \%x xs r. r @ <x,0>)
-\tdx{flat_def}        flat(ls)  == list_rec(ls, 0,  \%l ls r. l @ r)
-
-
-\tdx{NilI}            Nil : list(A)
-\tdx{ConsI}           [| a: A;  l: list(A) |] ==> Cons(a,l) : list(A)
-
-\tdx{List.induct}
-    [| l: list(A);
-       P(Nil);
-       !!x y. [| x: A;  y: list(A);  P(y) |] ==> P(Cons(x,y))
-    |] ==> P(l)
-
-\tdx{Cons_iff}        Cons(a,l)=Cons(a',l') <-> a=a' & l=l'
-\tdx{Nil_Cons_iff}    ~ Nil=Cons(a,l)
-
-\tdx{list_mono}       A<=B ==> list(A) <= list(B)
-
-\tdx{list_rec_Nil}    list_rec(Nil,c,h) = c
-\tdx{list_rec_Cons}   list_rec(Cons(a,l), c, h) = h(a, l, list_rec(l,c,h))
-
-\tdx{map_ident}       l: list(A) ==> map(\%u. u, l) = l
-\tdx{map_compose}     l: list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
-\tdx{map_app_distrib} xs: list(A) ==> map(h, xs@ys) = map(h,xs) @ map(h,ys)
-\tdx{map_type}
-    [| l: list(A);  !!x. x: A ==> h(x): B |] ==> map(h,l) : list(B)
-\tdx{map_flat}
-    ls: list(list(A)) ==> map(h, flat(ls)) = flat(map(map(h),ls))
-\end{ttbox}
-\caption{Lists} \label{zf-list}
-\end{figure}
-
-
-Figure~\ref{zf-list} presents the set of lists over~$A$, ${\tt list}(A)$.
-The definition employs Isabelle's datatype package, which defines the
-introduction and induction rules automatically, as well as the constructors
-and case operator (\verb|list_case|).  See file \texttt{ZF/List.ML}.
-The file \texttt{ZF/ListFn.thy} proceeds to define structural
-recursion and the usual list functions.
-
-The constructions of the natural numbers and lists make use of a suite of
-operators for handling recursive function definitions.  I have described
-the developments in detail elsewhere~\cite{paulson-set-II}.  Here is a brief
-summary:
-\begin{itemize}
-  \item Theory \texttt{Trancl} defines the transitive closure of a relation
-    (as a least fixedpoint).
-
-  \item Theory \texttt{WF} proves the Well-Founded Recursion Theorem, using an
-    elegant approach of Tobias Nipkow.  This theorem permits general
-    recursive definitions within set theory.
-
-  \item Theory \texttt{Ord} defines the notions of transitive set and ordinal
-    number.  It derives transfinite induction.  A key definition is {\bf
-      less than}: $i<j$ if and only if $i$ and $j$ are both ordinals and
-    $i\in j$.  As a special case, it includes less than on the natural
-    numbers.
-    
-  \item Theory \texttt{Epsilon} derives $\varepsilon$-induction and
-    $\varepsilon$-recursion, which are generalisations of transfinite
-    induction and recursion.  It also defines \cdx{rank}$(x)$, which
-    is the least ordinal $\alpha$ such that $x$ is constructed at
-    stage $\alpha$ of the cumulative hierarchy (thus $x\in
-    V@{\alpha+1}$).
-\end{itemize}
-
-Other important theories lead to a theory of cardinal numbers.  They have
-not yet been written up anywhere.  Here is a summary:
-\begin{itemize}
-\item Theory \texttt{Rel} defines the basic properties of relations, such as
-  (ir)reflexivity, (a)symmetry, and transitivity.
-
-\item Theory \texttt{EquivClass} develops a theory of equivalence
-  classes, not using the Axiom of Choice.
-
-\item Theory \texttt{Order} defines partial orderings, total orderings and
-  wellorderings.
-
-\item Theory \texttt{OrderArith} defines orderings on sum and product sets.
-  These can be used to define ordinal arithmetic and have applications to
-  cardinal arithmetic.
-
-\item Theory \texttt{OrderType} defines order types.  Every wellordering is
-  equivalent to a unique ordinal, which is its order type.
-
-\item Theory \texttt{Cardinal} defines equipollence and cardinal numbers.
- 
-\item Theory \texttt{CardinalArith} defines cardinal addition and
-  multiplication, and proves their elementary laws.  It proves that there
-  is no greatest cardinal.  It also proves a deep result, namely
-  $\kappa\otimes\kappa=\kappa$ for every infinite cardinal~$\kappa$; see
-  Kunen~\cite[page 29]{kunen80}.  None of these results assume the Axiom of
-  Choice, which complicates their proofs considerably.  
-\end{itemize}
-
-The following developments involve the Axiom of Choice (AC):
-\begin{itemize}
-\item Theory \texttt{AC} asserts the Axiom of Choice and proves some simple
-  equivalent forms.
-
-\item Theory \texttt{Zorn} proves Hausdorff's Maximal Principle, Zorn's Lemma
-  and the Wellordering Theorem, following Abrial and
-  Laffitte~\cite{abrial93}.
-
-\item Theory \verb|Cardinal_AC| uses AC to prove simplified theorems about
-  the cardinals.  It also proves a theorem needed to justify
-  infinitely branching datatype declarations: if $\kappa$ is an infinite
-  cardinal and $|X(\alpha)| \le \kappa$ for all $\alpha<\kappa$ then
-  $|\union\sb{\alpha<\kappa} X(\alpha)| \le \kappa$.
-
-\item Theory \texttt{InfDatatype} proves theorems to justify infinitely
-  branching datatypes.  Arbitrary index sets are allowed, provided their
-  cardinalities have an upper bound.  The theory also justifies some
-  unusual cases of finite branching, involving the finite powerset operator
-  and the finite function space operator.
-\end{itemize}
-
-
-
-\section{Simplification rules}
-{\ZF} does not merely inherit simplification from \FOL, but modifies it
-extensively.  File \texttt{ZF/simpdata.ML} contains the details.
-
-The extraction of rewrite rules takes set theory primitives into account.
-It can strip bounded universal quantifiers from a formula; for example,
-${\forall x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
-f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from
-$a\in A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
-
-The default simplification set contains congruence rules for
-all the binding operators of {\ZF}\@.  It contains all the conversion
-rules, such as \texttt{fst} and \texttt{snd}, as well as the rewrites
-shown in Fig.\ts\ref{zf-simpdata}.  See the file 
-\texttt{ZF/simpdata.ML} for a fuller list.
-
-\begin{figure}
-\begin{eqnarray*}
-  a\in \emptyset        & \bimp &  \bot\\
-  a \in A \un B      & \bimp &  a\in A \disj a\in B\\
-  a \in A \int B      & \bimp &  a\in A \conj a\in B\\
-  a \in A-B             & \bimp &  a\in A \conj \neg (a\in B)\\
-  \pair{a,b}\in {\tt Sigma}(A,B)
-                        & \bimp &  a\in A \conj b\in B(a)\\
-  a \in {\tt Collect}(A,P)      & \bimp &  a\in A \conj P(a)\\
-  (\forall x \in \emptyset. P(x)) & \bimp &  \top\\
-  (\forall x \in A. \top)       & \bimp &  \top
-\end{eqnarray*}
-\caption{Some rewrite rules for set theory} \label{zf-simpdata}
-\end{figure}
-
-
-\section{The examples directories}
-Directory \texttt{HOL/IMP} contains a mechanised version of a semantic
-equivalence proof taken from Winskel~\cite{winskel93}.  It formalises the
-denotational and operational semantics of a simple while-language, then
-proves the two equivalent.  It contains several datatype and inductive
-definitions, and demonstrates their use.
-
-The directory \texttt{ZF/ex} contains further developments in {\ZF} set
-theory.  Here is an overview; see the files themselves for more details.  I
-describe much of this material in other
-publications~\cite{paulson-set-I,paulson-set-II,paulson-CADE}. 
-\begin{itemize}
-\item File \texttt{misc.ML} contains miscellaneous examples such as
-  Cantor's Theorem, the Schr\"oder-Bernstein Theorem and the `Composition
-  of homomorphisms' challenge~\cite{boyer86}.
-
-\item Theory \texttt{Ramsey} proves the finite exponent 2 version of
-  Ramsey's Theorem, following Basin and Kaufmann's
-  presentation~\cite{basin91}.
-
-\item Theory \texttt{Integ} develops a theory of the integers as
-  equivalence classes of pairs of natural numbers.
-
-\item Theory \texttt{Primrec} develops some computation theory.  It
-  inductively defines the set of primitive recursive functions and presents a
-  proof that Ackermann's function is not primitive recursive.
-
-\item Theory \texttt{Primes} defines the Greatest Common Divisor of two
-  natural numbers and and the ``divides'' relation.
-
-\item Theory \texttt{Bin} defines a datatype for two's complement binary
-  integers, then proves rewrite rules to perform binary arithmetic.  For
-  instance, $1359\times {-}2468 = {-}3354012$ takes under 14 seconds.
-
-\item Theory \texttt{BT} defines the recursive data structure ${\tt
-    bt}(A)$, labelled binary trees.
-
-\item Theory \texttt{Term} defines a recursive data structure for terms
-  and term lists.  These are simply finite branching trees.
-
-\item Theory \texttt{TF} defines primitives for solving mutually
-  recursive equations over sets.  It constructs sets of trees and forests
-  as an example, including induction and recursion rules that handle the
-  mutual recursion.
-
-\item Theory \texttt{Prop} proves soundness and completeness of
-  propositional logic~\cite{paulson-set-II}.  This illustrates datatype
-  definitions, inductive definitions, structural induction and rule
-  induction.
-
-\item Theory \texttt{ListN} inductively defines the lists of $n$
-  elements~\cite{paulin92}.
-
-\item Theory \texttt{Acc} inductively defines the accessible part of a
-  relation~\cite{paulin92}.
-
-\item Theory \texttt{Comb} defines the datatype of combinators and
-  inductively defines contraction and parallel contraction.  It goes on to
-  prove the Church-Rosser Theorem.  This case study follows Camilleri and
-  Melham~\cite{camilleri92}.
-
-\item Theory \texttt{LList} defines lazy lists and a coinduction
-  principle for proving equations between them.
-\end{itemize}
-
-
-\section{A proof about powersets}\label{sec:ZF-pow-example}
-To demonstrate high-level reasoning about subsets, let us prove the
-equation ${{\tt Pow}(A)\cap {\tt Pow}(B)}= {\tt Pow}(A\cap B)$.  Compared
-with first-order logic, set theory involves a maze of rules, and theorems
-have many different proofs.  Attempting other proofs of the theorem might
-be instructive.  This proof exploits the lattice properties of
-intersection.  It also uses the monotonicity of the powerset operation,
-from \texttt{ZF/mono.ML}:
-\begin{ttbox}
-\tdx{Pow_mono}      A<=B ==> Pow(A) <= Pow(B)
-\end{ttbox}
-We enter the goal and make the first step, which breaks the equation into
-two inclusions by extensionality:\index{*equalityI theorem}
-\begin{ttbox}
-Goal "Pow(A Int B) = Pow(A) Int Pow(B)";
-{\out Level 0}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A Int B) = Pow(A) Int Pow(B)}
-\ttbreak
-by (resolve_tac [equalityI] 1);
-{\out Level 1}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A Int B) <= Pow(A) Int Pow(B)}
-{\out  2. Pow(A) Int Pow(B) <= Pow(A Int B)}
-\end{ttbox}
-Both inclusions could be tackled straightforwardly using \texttt{subsetI}.
-A shorter proof results from noting that intersection forms the greatest
-lower bound:\index{*Int_greatest theorem}
-\begin{ttbox}
-by (resolve_tac [Int_greatest] 1);
-{\out Level 2}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A Int B) <= Pow(A)}
-{\out  2. Pow(A Int B) <= Pow(B)}
-{\out  3. Pow(A) Int Pow(B) <= Pow(A Int B)}
-\end{ttbox}
-Subgoal~1 follows by applying the monotonicity of \texttt{Pow} to $A\int
-B\subseteq A$; subgoal~2 follows similarly:
-\index{*Int_lower1 theorem}\index{*Int_lower2 theorem}
-\begin{ttbox}
-by (resolve_tac [Int_lower1 RS Pow_mono] 1);
-{\out Level 3}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A Int B) <= Pow(B)}
-{\out  2. Pow(A) Int Pow(B) <= Pow(A Int B)}
-\ttbreak
-by (resolve_tac [Int_lower2 RS Pow_mono] 1);
-{\out Level 4}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A) Int Pow(B) <= Pow(A Int B)}
-\end{ttbox}
-We are left with the opposite inclusion, which we tackle in the
-straightforward way:\index{*subsetI theorem}
-\begin{ttbox}
-by (resolve_tac [subsetI] 1);
-{\out Level 5}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. !!x. x : Pow(A) Int Pow(B) ==> x : Pow(A Int B)}
-\end{ttbox}
-The subgoal is to show $x\in {\tt Pow}(A\cap B)$ assuming $x\in{\tt
-Pow}(A)\cap {\tt Pow}(B)$; eliminating this assumption produces two
-subgoals.  The rule \tdx{IntE} treats the intersection like a conjunction
-instead of unfolding its definition.
-\begin{ttbox}
-by (eresolve_tac [IntE] 1);
-{\out Level 6}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. !!x. [| x : Pow(A); x : Pow(B) |] ==> x : Pow(A Int B)}
-\end{ttbox}
-The next step replaces the \texttt{Pow} by the subset
-relation~($\subseteq$).\index{*PowI theorem}
-\begin{ttbox}
-by (resolve_tac [PowI] 1);
-{\out Level 7}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. !!x. [| x : Pow(A); x : Pow(B) |] ==> x <= A Int B}
-\end{ttbox}
-We perform the same replacement in the assumptions.  This is a good
-demonstration of the tactic \ttindex{dresolve_tac}:\index{*PowD theorem}
-\begin{ttbox}
-by (REPEAT (dresolve_tac [PowD] 1));
-{\out Level 8}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. !!x. [| x <= A; x <= B |] ==> x <= A Int B}
-\end{ttbox}
-The assumptions are that $x$ is a lower bound of both $A$ and~$B$, but
-$A\int B$ is the greatest lower bound:\index{*Int_greatest theorem}
-\begin{ttbox}
-by (resolve_tac [Int_greatest] 1);
-{\out Level 9}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. !!x. [| x <= A; x <= B |] ==> x <= A}
-{\out  2. !!x. [| x <= A; x <= B |] ==> x <= B}
-\end{ttbox}
-To conclude the proof, we clear up the trivial subgoals:
-\begin{ttbox}
-by (REPEAT (assume_tac 1));
-{\out Level 10}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out No subgoals!}
-\end{ttbox}
-\medskip
-We could have performed this proof in one step by applying
-\ttindex{Blast_tac}.  Let us
-go back to the start:
-\begin{ttbox}
-choplev 0;
-{\out Level 0}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out  1. Pow(A Int B) = Pow(A) Int Pow(B)}
-by (Blast_tac 1);
-{\out Depth = 0}
-{\out Depth = 1}
-{\out Depth = 2}
-{\out Depth = 3}
-{\out Level 1}
-{\out Pow(A Int B) = Pow(A) Int Pow(B)}
-{\out No subgoals!}
-\end{ttbox}
-Past researchers regarded this as a difficult proof, as indeed it is if all
-the symbols are replaced by their definitions.
-\goodbreak
-
-\section{Monotonicity of the union operator}
-For another example, we prove that general union is monotonic:
-${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
-tackle the inclusion using \tdx{subsetI}:
-\begin{ttbox}
-Goal "C<=D ==> Union(C) <= Union(D)";
-{\out Level 0}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. C <= D ==> Union(C) <= Union(D)}
-\ttbreak
-by (resolve_tac [subsetI] 1);
-{\out Level 1}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. !!x. [| C <= D; x : Union(C) |] ==> x : Union(D)}
-\end{ttbox}
-Big union is like an existential quantifier --- the occurrence in the
-assumptions must be eliminated early, since it creates parameters.
-\index{*UnionE theorem}
-\begin{ttbox}
-by (eresolve_tac [UnionE] 1);
-{\out Level 2}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> x : Union(D)}
-\end{ttbox}
-Now we may apply \tdx{UnionI}, which creates an unknown involving the
-parameters.  To show $x\in \bigcup(D)$ it suffices to show that $x$ belongs
-to some element, say~$\Var{B2}(x,B)$, of~$D$.
-\begin{ttbox}
-by (resolve_tac [UnionI] 1);
-{\out Level 3}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> ?B2(x,B) : D}
-{\out  2. !!x B. [| C <= D; x : B; B : C |] ==> x : ?B2(x,B)}
-\end{ttbox}
-Combining \tdx{subsetD} with the assumption $C\subseteq D$ yields 
-$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
-\texttt{eresolve_tac} has removed that assumption.
-\begin{ttbox}
-by (eresolve_tac [subsetD] 1);
-{\out Level 4}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. !!x B. [| x : B; B : C |] ==> ?B2(x,B) : C}
-{\out  2. !!x B. [| C <= D; x : B; B : C |] ==> x : ?B2(x,B)}
-\end{ttbox}
-The rest is routine.  Observe how~$\Var{B2}(x,B)$ is instantiated.
-\begin{ttbox}
-by (assume_tac 1);
-{\out Level 5}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> x : B}
-by (assume_tac 1);
-{\out Level 6}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out No subgoals!}
-\end{ttbox}
-Again, \ttindex{Blast_tac} can prove the theorem in one step.
-\begin{ttbox}
-by (Blast_tac 1);
-{\out Depth = 0}
-{\out Depth = 1}
-{\out Depth = 2}
-{\out Level 1}
-{\out C <= D ==> Union(C) <= Union(D)}
-{\out No subgoals!}
-\end{ttbox}
-
-The file \texttt{ZF/equalities.ML} has many similar proofs.  Reasoning about
-general intersection can be difficult because of its anomalous behaviour on
-the empty set.  However, \ttindex{Blast_tac} copes well with these.  Here is
-a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
-\begin{ttbox}
-a:C ==> (INT x:C. A(x) Int B(x)) = (INT x:C. A(x)) Int (INT x:C. B(x))
-\end{ttbox}
-In traditional notation this is
-\[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
-       \Bigl(\inter@{x\in C} A(x)\Bigr)  \int  
-       \Bigl(\inter@{x\in C} B(x)\Bigr)  \]
-
-\section{Low-level reasoning about functions}
-The derived rules \texttt{lamI}, \texttt{lamE}, \texttt{lam_type}, \texttt{beta}
-and \texttt{eta} support reasoning about functions in a
-$\lambda$-calculus style.  This is generally easier than regarding
-functions as sets of ordered pairs.  But sometimes we must look at the
-underlying representation, as in the following proof
-of~\tdx{fun_disjoint_apply1}.  This states that if $f$ and~$g$ are
-functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
-$(f\un g)`a = f`a$:
-\begin{ttbox}
-Goal "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \ttback
-\ttback    (f Un g)`a = f`a";
-{\out Level 0}
-{\out [| a : A; f : A -> B; g : C -> D; A Int C = 0 |]}
-{\out ==> (f Un g) ` a = f ` a}
-{\out  1. [| a : A; f : A -> B; g : C -> D; A Int C = 0 |]}
-{\out     ==> (f Un g) ` a = f ` a}
-\end{ttbox}
-Using \tdx{apply_equality}, we reduce the equality to reasoning about
-ordered pairs.  The second subgoal is to verify that $f\un g$ is a function.
-To save space, the assumptions will be abbreviated below.
-\begin{ttbox}
-by (resolve_tac [apply_equality] 1);
-{\out Level 1}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> <a,f ` a> : f Un g}
-{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
-\end{ttbox}
-We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
-choose~$f$:
-\begin{ttbox}
-by (resolve_tac [UnI1] 1);
-{\out Level 2}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> <a,f ` a> : f}
-{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
-\end{ttbox}
-To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
-essentially the converse of \tdx{apply_equality}:
-\begin{ttbox}
-by (resolve_tac [apply_Pair] 1);
-{\out Level 3}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> f : (PROD x:?A2. ?B2(x))}
-{\out  2. [| \ldots |] ==> a : ?A2}
-{\out  3. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
-\end{ttbox}
-Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
-from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
-function space, and observe that~{\tt?A2} is instantiated to~\texttt{A}.
-\begin{ttbox}
-by (assume_tac 1);
-{\out Level 4}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> a : A}
-{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
-by (assume_tac 1);
-{\out Level 5}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
-\end{ttbox}
-To construct functions of the form $f\un g$, we apply
-\tdx{fun_disjoint_Un}:
-\begin{ttbox}
-by (resolve_tac [fun_disjoint_Un] 1);
-{\out Level 6}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> f : ?A3 -> ?B3}
-{\out  2. [| \ldots |] ==> g : ?C3 -> ?D3}
-{\out  3. [| \ldots |] ==> ?A3 Int ?C3 = 0}
-\end{ttbox}
-The remaining subgoals are instances of the assumptions.  Again, observe how
-unknowns are instantiated:
-\begin{ttbox}
-by (assume_tac 1);
-{\out Level 7}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> g : ?C3 -> ?D3}
-{\out  2. [| \ldots |] ==> A Int ?C3 = 0}
-by (assume_tac 1);
-{\out Level 8}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out  1. [| \ldots |] ==> A Int C = 0}
-by (assume_tac 1);
-{\out Level 9}
-{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
-{\out No subgoals!}
-\end{ttbox}
-See the files \texttt{ZF/func.ML} and \texttt{ZF/WF.ML} for more
-examples of reasoning about functions.
-
-\index{set theory|)}
--- a/doc-src/Logics/intro.tex	Fri Jan 08 13:20:59 1999 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,150 +0,0 @@
-%% $Id$
-\chapter{Basic Concepts}
-Several logics come with Isabelle.  Many of them are sufficiently developed
-to serve as comfortable reasoning environments.  They are also good
-starting points for defining new logics.  Each logic is distributed with
-sample proofs, some of which are described in this document.
-
-\begin{ttdescription}
-\item[\thydx{FOL}] is many-sorted first-order logic with natural
-deduction.  It comes in both constructive and classical versions.
-
-\item[\thydx{ZF}] is axiomatic set theory, using the Zermelo-Fraenkel
-axioms~\cite{suppes72}.  It is built upon classical~\FOL{}.
-
-\item[\thydx{CCL}] is Martin Coen's Classical Computational Logic,
-  which is the basis of a preliminary method for deriving programs from
-  proofs~\cite{coen92}.  It is built upon classical~\FOL{}.
- 
-\item[\thydx{LCF}] is a version of Scott's Logic for Computable
-  Functions, which is also implemented by the~{\sc lcf}
-  system~\cite{paulson87}.  It is built upon classical~\FOL{}.
-
-\item[\thydx{HOL}] is the higher-order logic of Church~\cite{church40},
-which is also implemented by Gordon's~{\sc hol} system~\cite{mgordon-hol}.
-This object-logic should not be confused with Isabelle's meta-logic, which is
-also a form of higher-order logic.
-
-\item[\thydx{HOLCF}] is a version of {\sc lcf}, defined as an
-  extension of {\tt HOL}\@.
- 
-\item[\thydx{CTT}] is a version of Martin-L\"of's Constructive Type
-Theory~\cite{nordstrom90}, with extensional equality.  Universes are not
-included.
- 
-\item[\thydx{LK}] is another version of first-order logic, a classical
-sequent calculus.  Sequents have the form $A@1,\ldots,A@m\turn
-B@1,\ldots,B@n$; rules are applied using associative matching.
-
-\item[\thydx{Modal}] implements the modal logics $T$, $S4$,
-  and~$S43$.  It is built upon~\LK{}.
-
-\item[\thydx{Cube}] is Barendregt's $\lambda$-cube.
-\end{ttdescription}
-The logics {\tt CCL}, {\tt LCF}, {\tt HOLCF}, {\tt Modal} and {\tt
-  Cube} are currently undocumented.  All object-logics' sources are
-distributed with Isabelle (see the directory \texttt{src}).  They are
-also available for browsing on the WWW at:
-\begin{ttbox}
-http://www4.informatik.tu-muenchen.de/~nipkow/isabelle/
-\end{ttbox}
-Note that this is not necessarily consistent with your local sources!
-
-\medskip You should not read this manual before reading {\em
-  Introduction to Isabelle\/} and performing some Isabelle proofs.
-Consult the {\em Reference Manual} for more information on tactics,
-packages, etc.
-
-
-\section{Syntax definitions}
-The syntax of each logic is presented using a context-free grammar.
-These grammars obey the following conventions:
-\begin{itemize}
-\item identifiers denote nonterminal symbols
-\item {\tt typewriter} font denotes terminal symbols
-\item parentheses $(\ldots)$ express grouping
-\item constructs followed by a Kleene star, such as $id^*$ and $(\ldots)^*$
-can be repeated~0 or more times 
-\item alternatives are separated by a vertical bar,~$|$
-\item the symbol for alphanumeric identifiers is~{\it id\/} 
-\item the symbol for scheme variables is~{\it var}
-\end{itemize}
-To reduce the number of nonterminals and grammar rules required, Isabelle's
-syntax module employs {\bf priorities},\index{priorities} or precedences.
-Each grammar rule is given by a mixfix declaration, which has a priority,
-and each argument place has a priority.  This general approach handles
-infix operators that associate either to the left or to the right, as well
-as prefix and binding operators.
-
-In a syntactically valid expression, an operator's arguments never involve
-an operator of lower priority unless brackets are used.  Consider
-first-order logic, where $\exists$ has lower priority than $\disj$,
-which has lower priority than $\conj$.  There, $P\conj Q \disj R$
-abbreviates $(P\conj Q) \disj R$ rather than $P\conj (Q\disj R)$.  Also,
-$\exists x.P\disj Q$ abbreviates $\exists x.(P\disj Q)$ rather than
-$(\exists x.P)\disj Q$.  Note especially that $P\disj(\exists x.Q)$
-becomes syntactically invalid if the brackets are removed.
-
-A {\bf binder} is a symbol associated with a constant of type
-$(\sigma\To\tau)\To\tau'$.  For instance, we may declare~$\forall$ as
-a binder for the constant~$All$, which has type $(\alpha\To o)\To o$.
-This defines the syntax $\forall x.t$ to mean $All(\lambda x.t)$.  We
-can also write $\forall x@1\ldots x@m.t$ to abbreviate $\forall x@1.
-\ldots \forall x@m.t$; this is possible for any constant provided that
-$\tau$ and $\tau'$ are the same type.  \HOL's description operator
-$\varepsilon x.P\,x$ has type $(\alpha\To bool)\To\alpha$ and can bind
-only one variable, except when $\alpha$ is $bool$.  \ZF's bounded
-quantifier $\forall x\in A.P(x)$ cannot be declared as a binder
-because it has type $[i, i\To o]\To o$.  The syntax for binders allows
-type constraints on bound variables, as in
-\[ \forall (x{::}\alpha) \; (y{::}\beta) \; z{::}\gamma. Q(x,y,z) \]
-
-To avoid excess detail, the logic descriptions adopt a semi-formal style.
-Infix operators and binding operators are listed in separate tables, which
-include their priorities.  Grammar descriptions do not include numeric
-priorities; instead, the rules appear in order of decreasing priority.
-This should suffice for most purposes; for full details, please consult the
-actual syntax definitions in the {\tt.thy} files.
-
-Each nonterminal symbol is associated with some Isabelle type.  For
-example, the formulae of first-order logic have type~$o$.  Every
-Isabelle expression of type~$o$ is therefore a formula.  These include
-atomic formulae such as $P$, where $P$ is a variable of type~$o$, and more
-generally expressions such as $P(t,u)$, where $P$, $t$ and~$u$ have
-suitable types.  Therefore, `expression of type~$o$' is listed as a
-separate possibility in the grammar for formulae.
-
-
-\section{Proof procedures}\label{sec:safe}
-Most object-logics come with simple proof procedures.  These are reasonably
-powerful for interactive use, though often simplistic and incomplete.  You
-can do single-step proofs using \verb|resolve_tac| and
-\verb|assume_tac|, referring to the inference rules of the logic by {\sc
-ml} identifiers.
-
-For theorem proving, rules can be classified as {\bf safe} or {\bf unsafe}.
-A rule is safe if applying it to a provable goal always yields provable
-subgoals.  If a rule is safe then it can be applied automatically to a goal
-without destroying our chances of finding a proof.  For instance, all the
-rules of the classical sequent calculus {\sc lk} are safe.  Universal
-elimination is unsafe if the formula $\all{x}P(x)$ is deleted after use.
-Other unsafe rules include the following:
-\[ \infer[({\disj}I1)]{P\disj Q}{P} \qquad 
-   \infer[({\imp}E)]{Q}{P\imp Q & P} \qquad
-   \infer[({\exists}I)]{\exists x.P}{P[t/x]} 
-\]
-
-Proof procedures use safe rules whenever possible, delaying the application
-of unsafe rules.  Those safe rules are preferred that generate the fewest
-subgoals.  Safe rules are (by definition) deterministic, while the unsafe
-rules require search.  The design of a suitable set of rules can be as
-important as the strategy for applying them.
-
-Many of the proof procedures use backtracking.  Typically they attempt to
-solve subgoal~$i$ by repeatedly applying a certain tactic to it.  This
-tactic, which is known as a {\bf step tactic}, resolves a selection of
-rules with subgoal~$i$.  This may replace one subgoal by many;  the
-search persists until there are fewer subgoals in total than at the start.
-Backtracking happens when the search reaches a dead end: when the step
-tactic fails.  Alternative outcomes are then searched by a depth-first or
-best-first strategy.  
--- a/doc-src/Logics/logics.bbl	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/logics.bbl	Fri Jan 08 14:02:04 1999 +0100
@@ -1,36 +1,11 @@
 \begin{thebibliography}{10}
 
-\bibitem{abrial93}
-J.~R. Abrial and G.~Laffitte.
-\newblock Towards the mechanization of the proofs of some classical theorems of
-  set theory.
-\newblock preprint, February 1993.
-
 \bibitem{andrews86}
 Peter~B. Andrews.
 \newblock {\em An Introduction to Mathematical Logic and Type Theory: To Truth
   Through Proof}.
 \newblock Academic Press, 1986.
 
-\bibitem{basin91}
-David Basin and Matt Kaufmann.
-\newblock The {Boyer-Moore} prover and {Nuprl}: An experimental comparison.
-\newblock In {G\'erard} Huet and Gordon Plotkin, editors, {\em Logical
-  Frameworks}, pages 89--119. Cambridge University Press, 1991.
-
-\bibitem{boyer86}
-Robert Boyer, Ewing Lusk, William McCune, Ross Overbeek, Mark Stickel, and
-  Lawrence Wos.
-\newblock Set theory in first-order logic: Clauses for {G\"{o}del's} axioms.
-\newblock {\em Journal of Automated Reasoning}, 2(3):287--327, 1986.
-
-\bibitem{camilleri92}
-J.~Camilleri and T.~F. Melham.
-\newblock Reasoning with inductively defined relations in the {HOL} theorem
-  prover.
-\newblock Technical Report 265, Computer Laboratory, University of Cambridge,
-  August 1992.
-
 \bibitem{church40}
 Alonzo Church.
 \newblock A formulation of the simple theory of types.
@@ -48,26 +23,6 @@
   System}.
 \newblock Prentice-Hall, 1986.
 
-\bibitem{davey&priestley}
-B.~A. Davey and H.~A. Priestley.
-\newblock {\em Introduction to Lattices and Order}.
-\newblock Cambridge University Press, 1990.
-
-\bibitem{devlin79}
-Keith~J. Devlin.
-\newblock {\em Fundamentals of Contemporary Set Theory}.
-\newblock Springer, 1979.
-
-\bibitem{dummett}
-Michael Dummett.
-\newblock {\em Elements of Intuitionism}.
-\newblock Oxford University Press, 1977.
-
-\bibitem{dyckhoff}
-Roy Dyckhoff.
-\newblock Contraction-free sequent calculi for intuitionistic logic.
-\newblock {\em Journal of Symbolic Logic}, 57(3):795--807, 1992.
-
 \bibitem{felty91a}
 Amy Felty.
 \newblock A logic program for transforming sequent proofs to natural deduction
@@ -93,22 +48,12 @@
   Order Logic}.
 \newblock Cambridge University Press, 1993.
 
-\bibitem{halmos60}
-Paul~R. Halmos.
-\newblock {\em Naive Set Theory}.
-\newblock Van Nostrand, 1960.
-
 \bibitem{huet78}
 G.~P. Huet and B.~Lang.
 \newblock Proving and applying program transformations expressed with
   second-order patterns.
 \newblock {\em Acta Informatica}, 11:31--55, 1978.
 
-\bibitem{kunen80}
-Kenneth Kunen.
-\newblock {\em Set Theory: An Introduction to Independence Proofs}.
-\newblock North-Holland, 1980.
-
 \bibitem{alf}
 Lena Magnusson and Bengt {Nordstr\"{o}m}.
 \newblock The {ALF} proof editor and its proof engine.
@@ -140,8 +85,8 @@
 Wolfgang Naraschewski and Markus Wenzel.
 \newblock Object-oriented verification based on record subtyping in
   higher-order logic.
-\newblock In J.~Grundy and M.~Newey, editors, {\em Theorem Proving in Higher
-  Order Logics: {TPHOLs} '98}, LNCS 1479, pages 349--366, 1998.
+\newblock In Jim Grundy and Malcolm Newey, editors, {\em Theorem Proving in
+  Higher Order Logics: {TPHOLs} '98}, LNCS 1479, pages 349--366, 1998.
 
 \bibitem{nazareth-nipkow}
 Dieter Nazareth and Tobias Nipkow.
@@ -162,22 +107,11 @@
   Technology and Theoretical Computer Science}, volume 1180 of {\em LNCS},
   pages 180--192. Springer, 1996.
 
-\bibitem{noel}
-Philippe No{\"e}l.
-\newblock Experimenting with {Isabelle} in {ZF} set theory.
-\newblock {\em Journal of Automated Reasoning}, 10(1):15--58, 1993.
-
 \bibitem{nordstrom90}
 Bengt {Nordstr\"om}, Kent Petersson, and Jan Smith.
 \newblock {\em Programming in {Martin-L\"of}'s Type Theory. An Introduction}.
 \newblock Oxford University Press, 1990.
 
-\bibitem{paulin92}
-Christine Paulin-Mohring.
-\newblock Inductive definitions in the system {Coq}: Rules and properties.
-\newblock Research Report 92-49, LIP, Ecole Normale Sup\'erieure de Lyon,
-  December 1992.
-
 \bibitem{paulson85}
 Lawrence~C. Paulson.
 \newblock Verifying the unification algorithm in {LCF}.
@@ -188,24 +122,12 @@
 \newblock {\em Logic and Computation: Interactive proof with Cambridge LCF}.
 \newblock Cambridge University Press, 1987.
 
-\bibitem{paulson-set-I}
-Lawrence~C. Paulson.
-\newblock Set theory for verification: {I}. {From} foundations to functions.
-\newblock {\em Journal of Automated Reasoning}, 11(3):353--389, 1993.
-
 \bibitem{paulson-CADE}
 Lawrence~C. Paulson.
 \newblock A fixedpoint approach to implementing (co)inductive definitions.
 \newblock In Alan Bundy, editor, {\em Automated Deduction --- {CADE}-12
   International Conference}, LNAI 814, pages 148--161. Springer, 1994.
 
-\bibitem{paulson-final}
-Lawrence~C. Paulson.
-\newblock A concrete final coalgebra theorem for {ZF} set theory.
-\newblock In Peter Dybjer, Bengt Nordstr{\"om}, and Jan Smith, editors, {\em
-  Types for Proofs and Programs: International Workshop {TYPES '94}}, LNCS 996,
-  pages 120--139. Springer, 1995.
-
 \bibitem{paulson-set-II}
 Lawrence~C. Paulson.
 \newblock Set theory for verification: {II}. {Induction} and recursion.
@@ -229,6 +151,11 @@
 \newblock In {\em 10th Computer Security Foundations Workshop}, pages 70--83.
   IEEE Computer Society Press, 1997.
 
+\bibitem{isabelle-ZF}
+Lawrence~C. Paulson.
+\newblock {Isabelle}'s logics: {FOL} and {ZF}.
+\newblock Technical report, Computer Laboratory, University of Cambridge, 1999.
+
 \bibitem{paulson-COLOG}
 Lawrence~C. Paulson.
 \newblock A formulation of the simple theory of types (for {Isabelle}).
@@ -247,21 +174,11 @@
 \newblock A sequent-style model elimination strategy and a positive refinement.
 \newblock {\em Journal of Automated Reasoning}, 6(4):389--402, 1990.
 
-\bibitem{quaife92}
-Art Quaife.
-\newblock Automated deduction in {von Neumann-Bernays-G\"{o}del} set theory.
-\newblock {\em Journal of Automated Reasoning}, 8(1):91--147, 1992.
-
 \bibitem{slind-tfl}
 Konrad Slind.
 \newblock Function definition in higher-order logic.
 \newblock In von Wright et~al. \cite{tphols96}.
 
-\bibitem{suppes72}
-Patrick Suppes.
-\newblock {\em Axiomatic Set Theory}.
-\newblock Dover, 1972.
-
 \bibitem{takeuti87}
 G.~Takeuti.
 \newblock {\em Proof Theory}.
@@ -277,12 +194,6 @@
 \newblock {\em Theorem Proving in Higher Order Logics: {TPHOLs} '96}, LNCS
   1125, 1996.
 
-\bibitem{principia}
-A.~N. Whitehead and B.~Russell.
-\newblock {\em Principia Mathematica}.
-\newblock Cambridge University Press, 1962.
-\newblock Paperback edition to *56, abridged from the 2nd edition (1927).
-
 \bibitem{winskel93}
 Glynn Winskel.
 \newblock {\em The Formal Semantics of Programming Languages}.
--- a/doc-src/Logics/logics.ind	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/logics.ind	Fri Jan 08 14:02:04 1999 +0100
@@ -1,960 +1,659 @@
 \begin{theindex}
 
-  \item {\tt !} symbol, 60, 62, 69, 70, 82
-  \item {\tt[]} symbol, 82
-  \item {\tt\#} symbol, 82
-  \item {\tt\#*} symbol, 48, 137
-  \item {\tt\#+} symbol, 48, 137
-  \item {\tt\#-} symbol, 48
-  \item {\tt\&} symbol, 7, 60, 114
-  \item {\tt *} symbol, 27, 61, 79, 128
-  \item {\tt *} type, 77
-  \item {\tt +} symbol, 44, 61, 79, 128
-  \item {\tt +} type, 77
-  \item {\tt -} symbol, 26, 61, 79, 137
-  \item {\tt -->} symbol, 7, 60, 114, 128
-  \item {\tt ->} symbol, 27
-  \item {\tt -``} symbol, 26
-  \item {\tt :} symbol, 26, 68
-  \item {\tt <} constant, 80
-  \item {\tt <} symbol, 79
-  \item {\tt <->} symbol, 7, 114
-  \item {\tt <=} constant, 80
-  \item {\tt <=} symbol, 26, 68
-  \item {\tt =} symbol, 7, 60, 114, 128
-  \item {\tt ?} symbol, 60, 62, 69, 70
-  \item {\tt ?!} symbol, 60
-  \item {\tt\at} symbol, 60, 82
-  \item {\tt `} symbol, 26, 128
-  \item {\tt ``} symbol, 26, 68
-  \item \verb'{}' symbol, 68
-  \item {\tt |} symbol, 7, 60, 114
-  \item {\tt |-|} symbol, 137
-
-  \indexspace
-
-  \item {\tt 0} constant, 26, 79, 126
+  \item {\tt !} symbol, 6, 8, 15, 16, 28
+  \item {\tt[]} symbol, 28
+  \item {\tt\#} symbol, 28
+  \item {\tt\#*} symbol, 84
+  \item {\tt\#+} symbol, 84
+  \item {\tt\&} symbol, 6, 60
+  \item {\tt *} symbol, 7, 25, 75
+  \item {\tt *} type, 23
+  \item {\tt +} symbol, 7, 25, 75
+  \item {\tt +} type, 23
+  \item {\tt -} symbol, 7, 25, 84
+  \item {\tt -->} symbol, 6, 60, 75
+  \item {\tt :} symbol, 14
+  \item {\tt <} constant, 26
+  \item {\tt <} symbol, 25
+  \item {\tt <->} symbol, 60
+  \item {\tt <=} constant, 26
+  \item {\tt <=} symbol, 14
+  \item {\tt =} symbol, 6, 60, 75
+  \item {\tt ?} symbol, 6, 8, 15, 16
+  \item {\tt ?!} symbol, 6
+  \item {\tt\at} symbol, 6, 28
+  \item {\tt `} symbol, 75
+  \item {\tt ``} symbol, 14
+  \item \verb'{}' symbol, 14
+  \item {\tt |} symbol, 6, 60
+  \item {\tt |-|} symbol, 84
 
   \indexspace
 
-  \item {\tt absdiff_def} theorem, 137
-  \item {\tt add_assoc} theorem, 137
-  \item {\tt add_commute} theorem, 137
-  \item {\tt add_def} theorem, 48, 137
-  \item {\tt add_inverse_diff} theorem, 137
-  \item {\tt add_mp_tac}, \bold{135}
-  \item {\tt add_mult_dist} theorem, 48, 137
-  \item {\tt add_safes}, \bold{120}
-  \item {\tt add_typing} theorem, 137
-  \item {\tt add_unsafes}, \bold{120}
-  \item {\tt addC0} theorem, 137
-  \item {\tt addC_succ} theorem, 137
-  \item {\tt Addsplits}, \bold{76}
-  \item {\tt addsplits}, \bold{76}, 81, 93
-  \item {\tt ALL} symbol, 7, 27, 60, 62, 69, 70, 114
-  \item {\tt All} constant, 7, 60, 114
-  \item {\tt All_def} theorem, 64
-  \item {\tt all_dupE} theorem, 5, 9, 66
-  \item {\tt all_impE} theorem, 9
-  \item {\tt allE} theorem, 5, 9, 66
-  \item {\tt allI} theorem, 8, 66
-  \item {\tt allL} theorem, 116, 120
-  \item {\tt allL_thin} theorem, 117
-  \item {\tt allR} theorem, 116
-  \item {\tt and_def} theorem, 43, 64
-  \item {\tt app_def} theorem, 50
-  \item {\tt apply_def} theorem, 32
-  \item {\tt apply_equality} theorem, 40, 41, 57, 58
-  \item {\tt apply_equality2} theorem, 40
-  \item {\tt apply_iff} theorem, 40
-  \item {\tt apply_Pair} theorem, 40, 58
-  \item {\tt apply_type} theorem, 40
-  \item {\tt arg_cong} theorem, 65
-  \item {\tt Arith} theory, 47, 80, 136
-  \item assumptions
-    \subitem contradictory, 16
-    \subitem in {\CTT}, 125, 135
+  \item {\tt 0} constant, 25, 73
 
   \indexspace
 
-  \item {\tt Ball} constant, 26, 30, 68, 70
-  \item {\tt ball_cong} theorem, 33, 34
-  \item {\tt Ball_def} theorem, 31, 71
-  \item {\tt ballE} theorem, 33, 34, 72
-  \item {\tt ballI} theorem, 34, 72
-  \item {\tt basic} theorem, 116
-  \item {\tt basic_defs}, \bold{133}
-  \item {\tt best_tac}, \bold{121}
-  \item {\tt beta} theorem, 40, 41
-  \item {\tt Bex} constant, 26, 30, 68, 70
-  \item {\tt bex_cong} theorem, 33, 34
-  \item {\tt Bex_def} theorem, 31, 71
-  \item {\tt bexCI} theorem, 34, 70, 72
-  \item {\tt bexE} theorem, 34, 72
-  \item {\tt bexI} theorem, 34, 70, 72
-  \item {\tt bij} constant, 46
-  \item {\tt bij_converse_bij} theorem, 46
-  \item {\tt bij_def} theorem, 46
-  \item {\tt bij_disjoint_Un} theorem, 46
-  \item {\tt Blast_tac}, 17, 55, 56
-  \item {\tt blast_tac}, 18, 19, 21
-  \item {\tt bnd_mono_def} theorem, 45
-  \item {\tt Bool} theory, 41
-  \item {\textit {bool}} type, 61
-  \item {\tt bool_0I} theorem, 43
-  \item {\tt bool_1I} theorem, 43
-  \item {\tt bool_def} theorem, 43
-  \item {\tt boolE} theorem, 43
-  \item {\tt box_equals} theorem, 65, 67
-  \item {\tt bspec} theorem, 34, 72
-  \item {\tt butlast} constant, 82
+  \item {\tt absdiff_def} theorem, 84
+  \item {\tt add_assoc} theorem, 84
+  \item {\tt add_commute} theorem, 84
+  \item {\tt add_def} theorem, 84
+  \item {\tt add_inverse_diff} theorem, 84
+  \item {\tt add_mp_tac}, \bold{82}
+  \item {\tt add_mult_dist} theorem, 84
+  \item {\tt add_safes}, \bold{66}
+  \item {\tt add_typing} theorem, 84
+  \item {\tt add_unsafes}, \bold{66}
+  \item {\tt addC0} theorem, 84
+  \item {\tt addC_succ} theorem, 84
+  \item {\tt Addsplits}, \bold{22}
+  \item {\tt addsplits}, \bold{22}, 27, 39
+  \item {\tt ALL} symbol, 6, 8, 15, 16, 60
+  \item {\tt All} constant, 6, 60
+  \item {\tt All_def} theorem, 10
+  \item {\tt all_dupE} theorem, 12
+  \item {\tt allE} theorem, 12
+  \item {\tt allI} theorem, 12
+  \item {\tt allL} theorem, 62, 66
+  \item {\tt allL_thin} theorem, 63
+  \item {\tt allR} theorem, 62
+  \item {\tt and_def} theorem, 10
+  \item {\tt arg_cong} theorem, 11
+  \item {\tt Arith} theory, 26, 83
+  \item assumptions
+    \subitem in {\CTT}, 72, 82
 
   \indexspace
 
-  \item {\tt case} constant, 44
-  \item {\tt case} symbol, 63, 80, 81, 93
-  \item {\tt case_def} theorem, 44
-  \item {\tt case_Inl} theorem, 44
-  \item {\tt case_Inr} theorem, 44
-  \item {\tt case_tac}, \bold{67}
-  \item {\tt CCL} theory, 1
-  \item {\tt ccontr} theorem, 66
-  \item {\tt classical} theorem, 66
-  \item {\tt coinduct} theorem, 45
-  \item {\tt coinductive}, 105--108
-  \item {\tt Collect} constant, 26, 27, 30, 68, 70
-  \item {\tt Collect_def} theorem, 31
-  \item {\tt Collect_mem_eq} theorem, 70, 71
-  \item {\tt Collect_subset} theorem, 37
-  \item {\tt CollectD} theorem, 72, 111
-  \item {\tt CollectD1} theorem, 33, 35
-  \item {\tt CollectD2} theorem, 33, 35
-  \item {\tt CollectE} theorem, 33, 35, 72
-  \item {\tt CollectI} theorem, 35, 72, 111
-  \item {\tt comp_assoc} theorem, 46
-  \item {\tt comp_bij} theorem, 46
-  \item {\tt comp_def} theorem, 46
-  \item {\tt comp_func} theorem, 46
-  \item {\tt comp_func_apply} theorem, 46
-  \item {\tt comp_inj} theorem, 46
-  \item {\tt comp_rls}, \bold{133}
-  \item {\tt comp_surj} theorem, 46
-  \item {\tt comp_type} theorem, 46
-  \item {\tt Compl} constant, 68
-  \item {\tt Compl_def} theorem, 71
-  \item {\tt Compl_disjoint} theorem, 74
-  \item {\tt Compl_Int} theorem, 74
-  \item {\tt Compl_partition} theorem, 74
-  \item {\tt Compl_Un} theorem, 74
-  \item {\tt ComplD} theorem, 73
-  \item {\tt ComplI} theorem, 73
-  \item {\tt concat} constant, 82
-  \item {\tt cond_0} theorem, 43
-  \item {\tt cond_1} theorem, 43
-  \item {\tt cond_def} theorem, 43
-  \item {\tt cong} theorem, 65
-  \item congruence rules, 33
-  \item {\tt conj_cong}, 6, 75
-  \item {\tt conj_impE} theorem, 9, 10
-  \item {\tt conjE} theorem, 9, 65
-  \item {\tt conjI} theorem, 8, 65
-  \item {\tt conjL} theorem, 116
-  \item {\tt conjR} theorem, 116
-  \item {\tt conjunct1} theorem, 8, 65
-  \item {\tt conjunct2} theorem, 8, 65
-  \item {\tt conL} theorem, 117
-  \item {\tt conR} theorem, 117
-  \item {\tt cons} constant, 26, 27
-  \item {\tt cons_def} theorem, 32
-  \item {\tt Cons_iff} theorem, 50
-  \item {\tt consCI} theorem, 36
-  \item {\tt consE} theorem, 36
-  \item {\tt ConsI} theorem, 50
-  \item {\tt consI1} theorem, 36
-  \item {\tt consI2} theorem, 36
-  \item Constructive Type Theory, 125--147
-  \item {\tt contr} constant, 126
-  \item {\tt converse} constant, 26, 40
-  \item {\tt converse_def} theorem, 32
-  \item {\tt could_res}, \bold{119}
-  \item {\tt could_resolve_seq}, \bold{119}
-  \item {\tt CTT} theory, 1, 125
-  \item {\tt Cube} theory, 1
-  \item {\tt cut} theorem, 116
-  \item {\tt cut_facts_tac}, 19
-  \item {\tt cutL_tac}, \bold{118}
-  \item {\tt cutR_tac}, \bold{118}
+  \item {\tt Ball} constant, 14, 16
+  \item {\tt Ball_def} theorem, 17
+  \item {\tt ballE} theorem, 18
+  \item {\tt ballI} theorem, 18
+  \item {\tt basic} theorem, 62
+  \item {\tt basic_defs}, \bold{80}
+  \item {\tt best_tac}, \bold{67}
+  \item {\tt Bex} constant, 14, 16
+  \item {\tt Bex_def} theorem, 17
+  \item {\tt bexCI} theorem, 16, 18
+  \item {\tt bexE} theorem, 18
+  \item {\tt bexI} theorem, 16, 18
+  \item {\textit {bool}} type, 7
+  \item {\tt box_equals} theorem, 11, 13
+  \item {\tt bspec} theorem, 18
+  \item {\tt butlast} constant, 28
 
   \indexspace
 
-  \item {\tt datatype}, 90--98
-  \item {\tt Delsplits}, \bold{76}
-  \item {\tt delsplits}, \bold{76}
-  \item {\tt diff_0_eq_0} theorem, 137
-  \item {\tt Diff_cancel} theorem, 42
-  \item {\tt Diff_contains} theorem, 37
-  \item {\tt Diff_def} theorem, 31
-  \item {\tt diff_def} theorem, 48, 137
-  \item {\tt Diff_disjoint} theorem, 42
-  \item {\tt Diff_Int} theorem, 42
-  \item {\tt Diff_partition} theorem, 42
-  \item {\tt diff_self_eq_0} theorem, 137
-  \item {\tt Diff_subset} theorem, 37
-  \item {\tt diff_succ_succ} theorem, 137
-  \item {\tt diff_typing} theorem, 137
-  \item {\tt Diff_Un} theorem, 42
-  \item {\tt diffC0} theorem, 137
-  \item {\tt DiffD1} theorem, 36
-  \item {\tt DiffD2} theorem, 36
-  \item {\tt DiffE} theorem, 36
-  \item {\tt DiffI} theorem, 36
-  \item {\tt disj_impE} theorem, 9, 10, 14
-  \item {\tt disjCI} theorem, 11, 66
-  \item {\tt disjE} theorem, 8, 65
-  \item {\tt disjI1} theorem, 8, 65
-  \item {\tt disjI2} theorem, 8, 65
-  \item {\tt disjL} theorem, 116
-  \item {\tt disjR} theorem, 116
-  \item {\tt div} symbol, 48, 79, 137
-  \item {\tt div_def} theorem, 48, 137
-  \item {\tt div_geq} theorem, 80
-  \item {\tt div_less} theorem, 80
-  \item {\tt Divides} theory, 80
-  \item {\tt domain} constant, 26, 40
-  \item {\tt domain_def} theorem, 32
-  \item {\tt domain_of_fun} theorem, 40
-  \item {\tt domain_subset} theorem, 39
-  \item {\tt domain_type} theorem, 40
-  \item {\tt domainE} theorem, 39, 40
-  \item {\tt domainI} theorem, 39, 40
-  \item {\tt double_complement} theorem, 42, 74
-  \item {\tt dresolve_tac}, 54
-  \item {\tt drop} constant, 82
-  \item {\tt dropWhile} constant, 82
+  \item {\tt case} symbol, 9, 26, 27, 39
+  \item {\tt case_tac}, \bold{13}
+  \item {\tt CCL} theory, 1
+  \item {\tt ccontr} theorem, 12
+  \item {\tt classical} theorem, 12
+  \item {\tt coinductive}, 51--54
+  \item {\tt Collect} constant, 14, 16
+  \item {\tt Collect_mem_eq} theorem, 16, 17
+  \item {\tt CollectD} theorem, 18, 57
+  \item {\tt CollectE} theorem, 18
+  \item {\tt CollectI} theorem, 18, 57
+  \item {\tt comp_rls}, \bold{80}
+  \item {\tt Compl} constant, 14
+  \item {\tt Compl_def} theorem, 17
+  \item {\tt Compl_disjoint} theorem, 20
+  \item {\tt Compl_Int} theorem, 20
+  \item {\tt Compl_partition} theorem, 20
+  \item {\tt Compl_Un} theorem, 20
+  \item {\tt ComplD} theorem, 19
+  \item {\tt ComplI} theorem, 19
+  \item {\tt concat} constant, 28
+  \item {\tt cong} theorem, 11
+  \item {\tt conj_cong}, 21
+  \item {\tt conjE} theorem, 11
+  \item {\tt conjI} theorem, 11
+  \item {\tt conjL} theorem, 62
+  \item {\tt conjR} theorem, 62
+  \item {\tt conjunct1} theorem, 11
+  \item {\tt conjunct2} theorem, 11
+  \item {\tt conL} theorem, 63
+  \item {\tt conR} theorem, 63
+  \item Constructive Type Theory, 72--94
+  \item {\tt contr} constant, 73
+  \item {\tt could_res}, \bold{65}
+  \item {\tt could_resolve_seq}, \bold{65}
+  \item {\tt CTT} theory, 1, 72
+  \item {\tt Cube} theory, 1
+  \item {\tt cut} theorem, 62
+  \item {\tt cutL_tac}, \bold{64}
+  \item {\tt cutR_tac}, \bold{64}
+
+  \indexspace
+
+  \item {\tt datatype}, 36--44
+  \item {\tt Delsplits}, \bold{22}
+  \item {\tt delsplits}, \bold{22}
+  \item {\tt diff_0_eq_0} theorem, 84
+  \item {\tt diff_def} theorem, 84
+  \item {\tt diff_self_eq_0} theorem, 84
+  \item {\tt diff_succ_succ} theorem, 84
+  \item {\tt diff_typing} theorem, 84
+  \item {\tt diffC0} theorem, 84
+  \item {\tt disjCI} theorem, 12
+  \item {\tt disjE} theorem, 11
+  \item {\tt disjI1} theorem, 11
+  \item {\tt disjI2} theorem, 11
+  \item {\tt disjL} theorem, 62
+  \item {\tt disjR} theorem, 62
+  \item {\tt div} symbol, 25, 84
+  \item {\tt div_def} theorem, 84
+  \item {\tt div_geq} theorem, 26
+  \item {\tt div_less} theorem, 26
+  \item {\tt Divides} theory, 26
+  \item {\tt double_complement} theorem, 20
+  \item {\tt drop} constant, 28
+  \item {\tt dropWhile} constant, 28
 
   \indexspace
 
-  \item {\tt Elem} constant, 126
-  \item {\tt elim_rls}, \bold{133}
-  \item {\tt elimL_rls}, \bold{133}
-  \item {\tt empty_def} theorem, 71
-  \item {\tt empty_pack}, \bold{119}
-  \item {\tt empty_subsetI} theorem, 34
-  \item {\tt emptyE} theorem, 34, 73
-  \item {\tt Eps} constant, 60, 62
-  \item {\tt Eq} constant, 126
-  \item {\tt eq} constant, 126, 131
-  \item {\tt eq_mp_tac}, \bold{10}
-  \item {\tt EqC} theorem, 132
-  \item {\tt EqE} theorem, 132
-  \item {\tt Eqelem} constant, 126
-  \item {\tt EqF} theorem, 132
-  \item {\tt EqFL} theorem, 132
-  \item {\tt EqI} theorem, 132
-  \item {\tt Eqtype} constant, 126
-  \item {\tt equal_tac}, \bold{134}
-  \item {\tt equal_types} theorem, 129
-  \item {\tt equal_typesL} theorem, 129
-  \item {\tt equalityCE} theorem, 70, 72, 111, 112
-  \item {\tt equalityD1} theorem, 34, 72
-  \item {\tt equalityD2} theorem, 34, 72
-  \item {\tt equalityE} theorem, 34, 72
-  \item {\tt equalityI} theorem, 34, 53, 72
-  \item {\tt equals0D} theorem, 34
-  \item {\tt equals0I} theorem, 34
-  \item {\tt eresolve_tac}, 16
-  \item {\tt eta} theorem, 40, 41
-  \item {\tt EX} symbol, 7, 27, 60, 62, 69, 70, 114
-  \item {\tt Ex} constant, 7, 60, 114
-  \item {\tt EX!} symbol, 7, 60
-  \item {\tt Ex1} constant, 7, 60
-  \item {\tt Ex1_def} theorem, 64
-  \item {\tt ex1_def} theorem, 8
-  \item {\tt ex1E} theorem, 9, 66
-  \item {\tt ex1I} theorem, 9, 66
-  \item {\tt Ex_def} theorem, 64
-  \item {\tt ex_impE} theorem, 9
-  \item {\tt exCI} theorem, 11, 15, 66
-  \item {\tt excluded_middle} theorem, 11, 66
-  \item {\tt exE} theorem, 8, 66
-  \item {\tt exhaust_tac}, \bold{94}
-  \item {\tt exI} theorem, 8, 66
-  \item {\tt exL} theorem, 116
-  \item {\tt Exp} theory, 109
-  \item {\tt exR} theorem, 116, 120, 121
-  \item {\tt exR_thin} theorem, 117, 121, 122
-  \item {\tt ext} theorem, 63, 64
-  \item {\tt extension} theorem, 31
+  \item {\tt Elem} constant, 73
+  \item {\tt elim_rls}, \bold{80}
+  \item {\tt elimL_rls}, \bold{80}
+  \item {\tt empty_def} theorem, 17
+  \item {\tt empty_pack}, \bold{66}
+  \item {\tt emptyE} theorem, 19
+  \item {\tt Eps} constant, 6, 8
+  \item {\tt Eq} constant, 73
+  \item {\tt eq} constant, 73, 78
+  \item {\tt EqC} theorem, 79
+  \item {\tt EqE} theorem, 79
+  \item {\tt Eqelem} constant, 73
+  \item {\tt EqF} theorem, 79
+  \item {\tt EqFL} theorem, 79
+  \item {\tt EqI} theorem, 79
+  \item {\tt Eqtype} constant, 73
+  \item {\tt equal_tac}, \bold{81}
+  \item {\tt equal_types} theorem, 76
+  \item {\tt equal_typesL} theorem, 76
+  \item {\tt equalityCE} theorem, 16, 18, 57, 58
+  \item {\tt equalityD1} theorem, 18
+  \item {\tt equalityD2} theorem, 18
+  \item {\tt equalityE} theorem, 18
+  \item {\tt equalityI} theorem, 18
+  \item {\tt EX} symbol, 6, 8, 15, 16, 60
+  \item {\tt Ex} constant, 6, 60
+  \item {\tt EX!} symbol, 6
+  \item {\tt Ex1} constant, 6
+  \item {\tt Ex1_def} theorem, 10
+  \item {\tt ex1E} theorem, 12
+  \item {\tt ex1I} theorem, 12
+  \item {\tt Ex_def} theorem, 10
+  \item {\tt exCI} theorem, 12
+  \item {\tt excluded_middle} theorem, 12
+  \item {\tt exE} theorem, 12
+  \item {\tt exhaust_tac}, \bold{40}
+  \item {\tt exI} theorem, 12
+  \item {\tt exL} theorem, 62
+  \item {\tt Exp} theory, 55
+  \item {\tt exR} theorem, 62, 66, 68
+  \item {\tt exR_thin} theorem, 63, 68, 69
+  \item {\tt ext} theorem, 9, 10
 
   \indexspace
 
-  \item {\tt F} constant, 126
-  \item {\tt False} constant, 7, 60, 114
-  \item {\tt False_def} theorem, 64
-  \item {\tt FalseE} theorem, 8, 65
-  \item {\tt FalseL} theorem, 116
-  \item {\tt fast_tac}, \bold{121}
-  \item {\tt FE} theorem, 132, 136
-  \item {\tt FEL} theorem, 132
-  \item {\tt FF} theorem, 132
-  \item {\tt field} constant, 26
-  \item {\tt field_def} theorem, 32
-  \item {\tt field_subset} theorem, 39
-  \item {\tt fieldCI} theorem, 39
-  \item {\tt fieldE} theorem, 39
-  \item {\tt fieldI1} theorem, 39
-  \item {\tt fieldI2} theorem, 39
-  \item {\tt filseq_resolve_tac}, \bold{119}
-  \item {\tt filt_resolve_tac}, 119, 134
-  \item {\tt filter} constant, 82
-  \item {\tt Fin.consI} theorem, 49
-  \item {\tt Fin.emptyI} theorem, 49
-  \item {\tt Fin_induct} theorem, 49
-  \item {\tt Fin_mono} theorem, 49
-  \item {\tt Fin_subset} theorem, 49
-  \item {\tt Fin_UnI} theorem, 49
-  \item {\tt Fin_UnionI} theorem, 49
-  \item first-order logic, 5--23
-  \item {\tt Fixedpt} theory, 43
-  \item {\tt flat} constant, 50
-  \item {\tt flat_def} theorem, 50
-  \item flex-flex constraints, 115
-  \item {\tt FOL} theory, 1, 5, 11, 135
-  \item {\tt FOL_cs}, \bold{11}
-  \item {\tt FOL_ss}, \bold{6}
-  \item {\tt foldl} constant, 82
-  \item {\tt form_rls}, \bold{133}
-  \item {\tt formL_rls}, \bold{133}
-  \item {\tt forms_of_seq}, \bold{118}
-  \item {\tt foundation} theorem, 31
-  \item {\tt fst} constant, 26, 33, 77, 126, 131
-  \item {\tt fst_conv} theorem, 38, 77
-  \item {\tt fst_def} theorem, 32, 131
-  \item {\tt Fun} theory, 75
-  \item {\textit {fun}} type, 61
-  \item {\tt fun_cong} theorem, 65
-  \item {\tt fun_disjoint_apply1} theorem, 41, 57
-  \item {\tt fun_disjoint_apply2} theorem, 41
-  \item {\tt fun_disjoint_Un} theorem, 41, 58
-  \item {\tt fun_empty} theorem, 41
-  \item {\tt fun_extension} theorem, 40, 41
-  \item {\tt fun_is_rel} theorem, 40
-  \item {\tt fun_single} theorem, 41
+  \item {\tt F} constant, 73
+  \item {\tt False} constant, 6, 60
+  \item {\tt False_def} theorem, 10
+  \item {\tt FalseE} theorem, 11
+  \item {\tt FalseL} theorem, 62
+  \item {\tt fast_tac}, \bold{67}
+  \item {\tt FE} theorem, 79, 83
+  \item {\tt FEL} theorem, 79
+  \item {\tt FF} theorem, 79
+  \item {\tt filseq_resolve_tac}, \bold{65}
+  \item {\tt filt_resolve_tac}, 65, 81
+  \item {\tt filter} constant, 28
+  \item flex-flex constraints, 61
+  \item {\tt FOL} theory, 82
+  \item {\tt foldl} constant, 28
+  \item {\tt form_rls}, \bold{80}
+  \item {\tt formL_rls}, \bold{80}
+  \item {\tt forms_of_seq}, \bold{64}
+  \item {\tt fst} constant, 23, 73, 78
+  \item {\tt fst_conv} theorem, 23
+  \item {\tt fst_def} theorem, 78
+  \item {\tt Fun} theory, 21
+  \item {\textit {fun}} type, 7
+  \item {\tt fun_cong} theorem, 11
   \item function applications
-    \subitem in \CTT, 128
-    \subitem in \ZF, 26
+    \subitem in \CTT, 75
 
   \indexspace
 
-  \item {\tt gfp_def} theorem, 45
-  \item {\tt gfp_least} theorem, 45
-  \item {\tt gfp_mono} theorem, 45
-  \item {\tt gfp_subset} theorem, 45
-  \item {\tt gfp_Tarski} theorem, 45
-  \item {\tt gfp_upperbound} theorem, 45
-  \item {\tt Goalw}, 18, 19
-
-  \indexspace
-
-  \item {\tt hd} constant, 82
-  \item higher-order logic, 59--112
-  \item {\tt HOL} theory, 1, 59
-  \item {\sc hol} system, 59, 62
-  \item {\tt HOL_basic_ss}, \bold{75}
-  \item {\tt HOL_cs}, \bold{76}
-  \item {\tt HOL_quantifiers}, \bold{62}, 70
-  \item {\tt HOL_ss}, \bold{75}
+  \item {\tt hd} constant, 28
+  \item higher-order logic, 5--58
+  \item {\tt HOL} theory, 1, 5
+  \item {\sc hol} system, 5, 8
+  \item {\tt HOL_basic_ss}, \bold{21}
+  \item {\tt HOL_cs}, \bold{22}
+  \item {\tt HOL_quantifiers}, \bold{8}, 16
+  \item {\tt HOL_ss}, \bold{21}
   \item {\tt HOLCF} theory, 1
-  \item {\tt hyp_rew_tac}, \bold{135}
-  \item {\tt hyp_subst_tac}, 6, 75
+  \item {\tt hyp_rew_tac}, \bold{82}
+  \item {\tt hyp_subst_tac}, 21
 
   \indexspace
 
-  \item {\textit {i}} type, 25, 125
-  \item {\tt id} constant, 46
-  \item {\tt id_def} theorem, 46
-  \item {\tt If} constant, 60
-  \item {\tt if} constant, 26
-  \item {\tt if_def} theorem, 18, 31, 64
-  \item {\tt if_not_P} theorem, 36, 66
-  \item {\tt if_P} theorem, 36, 66
-  \item {\tt ifE} theorem, 19
-  \item {\tt iff} theorem, 63, 64
-  \item {\tt iff_def} theorem, 8, 116
-  \item {\tt iff_impE} theorem, 9
-  \item {\tt iffCE} theorem, 11, 66, 70
-  \item {\tt iffD1} theorem, 9, 65
-  \item {\tt iffD2} theorem, 9, 65
-  \item {\tt iffE} theorem, 9, 65
-  \item {\tt iffI} theorem, 9, 19, 65
-  \item {\tt iffL} theorem, 117, 123
-  \item {\tt iffR} theorem, 117
-  \item {\tt ifI} theorem, 19
-  \item {\tt IFOL} theory, 5
-  \item {\tt IFOL_ss}, \bold{6}
-  \item {\tt image_def} theorem, 32, 71
-  \item {\tt imageE} theorem, 39, 73
-  \item {\tt imageI} theorem, 39, 73
-  \item {\tt imp_impE} theorem, 9, 14
-  \item {\tt impCE} theorem, 11, 66
-  \item {\tt impE} theorem, 9, 10, 65
-  \item {\tt impI} theorem, 8, 63
-  \item {\tt impL} theorem, 116
-  \item {\tt impR} theorem, 116
-  \item {\tt in} symbol, 28, 61
-  \item {\textit {ind}} type, 78
-  \item {\tt induct} theorem, 45
-  \item {\tt induct_tac}, 80, \bold{94}
-  \item {\tt inductive}, 105--108
-  \item {\tt Inf} constant, 26, 30
-  \item {\tt infinity} theorem, 32
-  \item {\tt inj} constant, 46, 75
-  \item {\tt inj_converse_inj} theorem, 46
-  \item {\tt inj_def} theorem, 46, 75
-  \item {\tt inj_Inl} theorem, 79
-  \item {\tt inj_Inr} theorem, 79
-  \item {\tt inj_on} constant, 75
-  \item {\tt inj_on_def} theorem, 75
-  \item {\tt inj_Suc} theorem, 79
-  \item {\tt Inl} constant, 44, 79
-  \item {\tt inl} constant, 126, 131, 141
-  \item {\tt Inl_def} theorem, 44
-  \item {\tt Inl_inject} theorem, 44
-  \item {\tt Inl_neq_Inr} theorem, 44
-  \item {\tt Inl_not_Inr} theorem, 79
-  \item {\tt Inr} constant, 44, 79
-  \item {\tt inr} constant, 126, 131
-  \item {\tt Inr_def} theorem, 44
-  \item {\tt Inr_inject} theorem, 44
-  \item {\tt insert} constant, 68
-  \item {\tt insert_def} theorem, 71
-  \item {\tt insertE} theorem, 73
-  \item {\tt insertI1} theorem, 73
-  \item {\tt insertI2} theorem, 73
-  \item {\tt INT} symbol, 27, 29, 68--70
-  \item {\tt Int} symbol, 26, 68
-  \item {\tt Int_absorb} theorem, 42, 74
-  \item {\tt Int_assoc} theorem, 42, 74
-  \item {\tt Int_commute} theorem, 42, 74
-  \item {\tt INT_D} theorem, 73
-  \item {\tt Int_def} theorem, 31, 71
-  \item {\tt INT_E} theorem, 35, 73
-  \item {\tt Int_greatest} theorem, 37, 53, 55, 74
-  \item {\tt INT_I} theorem, 35, 73
-  \item {\tt Int_Inter_image} theorem, 74
-  \item {\tt Int_lower1} theorem, 37, 54, 74
-  \item {\tt Int_lower2} theorem, 37, 54, 74
-  \item {\tt Int_Un_distrib} theorem, 42, 74
-  \item {\tt Int_Union} theorem, 74
-  \item {\tt Int_Union_RepFun} theorem, 42
-  \item {\tt IntD1} theorem, 36, 73
-  \item {\tt IntD2} theorem, 36, 73
-  \item {\tt IntE} theorem, 36, 54, 73
-  \item {\tt INTER} constant, 68
-  \item {\tt Inter} constant, 26, 68
-  \item {\tt INTER1} constant, 68
-  \item {\tt INTER1_def} theorem, 71
-  \item {\tt INTER_def} theorem, 71
-  \item {\tt Inter_def} theorem, 31, 71
-  \item {\tt Inter_greatest} theorem, 37, 74
-  \item {\tt Inter_lower} theorem, 37, 74
-  \item {\tt Inter_Un_distrib} theorem, 42, 74
-  \item {\tt InterD} theorem, 35, 73
-  \item {\tt InterE} theorem, 35, 73
-  \item {\tt InterI} theorem, 33, 35, 73
-  \item {\tt IntI} theorem, 36, 73
-  \item {\tt IntPr.best_tac}, \bold{11}
-  \item {\tt IntPr.fast_tac}, \bold{10}, 13
-  \item {\tt IntPr.inst_step_tac}, \bold{10}
-  \item {\tt IntPr.safe_step_tac}, \bold{10}
-  \item {\tt IntPr.safe_tac}, \bold{10}
-  \item {\tt IntPr.step_tac}, \bold{10}
-  \item {\tt intr_rls}, \bold{133}
-  \item {\tt intr_tac}, \bold{134}, 143, 144
-  \item {\tt intrL_rls}, \bold{133}
-  \item {\tt inv} constant, 75
-  \item {\tt inv_def} theorem, 75
+  \item {\textit {i}} type, 72
+  \item {\tt If} constant, 6
+  \item {\tt if_def} theorem, 10
+  \item {\tt if_not_P} theorem, 12
+  \item {\tt if_P} theorem, 12
+  \item {\tt iff} theorem, 9, 10
+  \item {\tt iff_def} theorem, 62
+  \item {\tt iffCE} theorem, 12, 16
+  \item {\tt iffD1} theorem, 11
+  \item {\tt iffD2} theorem, 11
+  \item {\tt iffE} theorem, 11
+  \item {\tt iffI} theorem, 11
+  \item {\tt iffL} theorem, 63, 70
+  \item {\tt iffR} theorem, 63
+  \item {\tt ILL} theory, 1
+  \item {\tt image_def} theorem, 17
+  \item {\tt imageE} theorem, 19
+  \item {\tt imageI} theorem, 19
+  \item {\tt impCE} theorem, 12
+  \item {\tt impE} theorem, 11
+  \item {\tt impI} theorem, 9
+  \item {\tt impL} theorem, 62
+  \item {\tt impR} theorem, 62
+  \item {\tt in} symbol, 7
+  \item {\textit {ind}} type, 24
+  \item {\tt induct_tac}, 26, \bold{40}
+  \item {\tt inductive}, 51--54
+  \item {\tt inj} constant, 21
+  \item {\tt inj_def} theorem, 21
+  \item {\tt inj_Inl} theorem, 25
+  \item {\tt inj_Inr} theorem, 25
+  \item {\tt inj_on} constant, 21
+  \item {\tt inj_on_def} theorem, 21
+  \item {\tt inj_Suc} theorem, 25
+  \item {\tt Inl} constant, 25
+  \item {\tt inl} constant, 73, 78, 88
+  \item {\tt Inl_not_Inr} theorem, 25
+  \item {\tt Inr} constant, 25
+  \item {\tt inr} constant, 73, 78
+  \item {\tt insert} constant, 14
+  \item {\tt insert_def} theorem, 17
+  \item {\tt insertE} theorem, 19
+  \item {\tt insertI1} theorem, 19
+  \item {\tt insertI2} theorem, 19
+  \item {\tt INT} symbol, 14--16
+  \item {\tt Int} symbol, 14
+  \item {\tt Int_absorb} theorem, 20
+  \item {\tt Int_assoc} theorem, 20
+  \item {\tt Int_commute} theorem, 20
+  \item {\tt INT_D} theorem, 19
+  \item {\tt Int_def} theorem, 17
+  \item {\tt INT_E} theorem, 19
+  \item {\tt Int_greatest} theorem, 20
+  \item {\tt INT_I} theorem, 19
+  \item {\tt Int_Inter_image} theorem, 20
+  \item {\tt Int_lower1} theorem, 20
+  \item {\tt Int_lower2} theorem, 20
+  \item {\tt Int_Un_distrib} theorem, 20
+  \item {\tt Int_Union} theorem, 20
+  \item {\tt IntD1} theorem, 19
+  \item {\tt IntD2} theorem, 19
+  \item {\tt IntE} theorem, 19
+  \item {\tt INTER} constant, 14
+  \item {\tt Inter} constant, 14
+  \item {\tt INTER1} constant, 14
+  \item {\tt INTER1_def} theorem, 17
+  \item {\tt INTER_def} theorem, 17
+  \item {\tt Inter_def} theorem, 17
+  \item {\tt Inter_greatest} theorem, 20
+  \item {\tt Inter_lower} theorem, 20
+  \item {\tt Inter_Un_distrib} theorem, 20
+  \item {\tt InterD} theorem, 19
+  \item {\tt InterE} theorem, 19
+  \item {\tt InterI} theorem, 19
+  \item {\tt IntI} theorem, 19
+  \item {\tt intr_rls}, \bold{80}
+  \item {\tt intr_tac}, \bold{81}, 90, 91
+  \item {\tt intrL_rls}, \bold{80}
+  \item {\tt inv} constant, 21
+  \item {\tt inv_def} theorem, 21
 
   \indexspace
 
-  \item {\tt lam} symbol, 27, 29, 128
-  \item {\tt lam_def} theorem, 32
-  \item {\tt lam_type} theorem, 40
-  \item {\tt Lambda} constant, 26, 30
-  \item {\tt lambda} constant, 126, 128
+  \item {\tt lam} symbol, 75
+  \item {\tt lambda} constant, 73, 75
   \item $\lambda$-abstractions
-    \subitem in \CTT, 128
-    \subitem in \ZF, 27
-  \item {\tt lamE} theorem, 40, 41
-  \item {\tt lamI} theorem, 40, 41
-  \item {\tt last} constant, 82
+    \subitem in \CTT, 75
+  \item {\tt last} constant, 28
   \item {\tt LCF} theory, 1
-  \item {\tt le_cs}, \bold{24}
-  \item {\tt LEAST} constant, 61, 62, 80
-  \item {\tt Least} constant, 60
-  \item {\tt Least_def} theorem, 64
-  \item {\tt left_comp_id} theorem, 46
-  \item {\tt left_comp_inverse} theorem, 46
-  \item {\tt left_inverse} theorem, 46
-  \item {\tt length} constant, 50, 82
-  \item {\tt length_def} theorem, 50
-  \item {\tt less_induct} theorem, 81
-  \item {\tt Let} constant, 25, 26, 60, 63
-  \item {\tt let} symbol, 28, 61, 63
-  \item {\tt Let_def} theorem, 25, 31, 63, 64
-  \item {\tt LFilter} theory, 109
-  \item {\tt lfp_def} theorem, 45
-  \item {\tt lfp_greatest} theorem, 45
-  \item {\tt lfp_lowerbound} theorem, 45
-  \item {\tt lfp_mono} theorem, 45
-  \item {\tt lfp_subset} theorem, 45
-  \item {\tt lfp_Tarski} theorem, 45
-  \item {\tt List} theory, 81, 82
-  \item {\textit{list}} type, 81
-  \item {\tt list} constant, 50
-  \item {\tt List.induct} theorem, 50
-  \item {\tt list_case} constant, 50
-  \item {\tt list_mono} theorem, 50
-  \item {\tt list_rec} constant, 50
-  \item {\tt list_rec_Cons} theorem, 50
-  \item {\tt list_rec_def} theorem, 50
-  \item {\tt list_rec_Nil} theorem, 50
-  \item {\tt LK} theory, 1, 113, 117
-  \item {\tt LK_dup_pack}, \bold{120}, 121
-  \item {\tt LK_pack}, \bold{120}
-  \item {\tt LList} theory, 109
-  \item {\tt logic} class, 5
+  \item {\tt LEAST} constant, 7, 8, 26
+  \item {\tt Least} constant, 6
+  \item {\tt Least_def} theorem, 10
+  \item {\tt length} constant, 28
+  \item {\tt less_induct} theorem, 27
+  \item {\tt Let} constant, 6, 9
+  \item {\tt let} symbol, 7, 9
+  \item {\tt Let_def} theorem, 9, 10
+  \item {\tt LFilter} theory, 55
+  \item {\tt List} theory, 27, 28
+  \item {\textit{list}} type, 27
+  \item {\tt LK} theory, 1, 59, 63
+  \item {\tt LK_dup_pack}, \bold{66}, 67
+  \item {\tt LK_pack}, \bold{66}
+  \item {\tt LList} theory, 55
 
   \indexspace
 
-  \item {\tt map} constant, 50, 82
-  \item {\tt map_app_distrib} theorem, 50
-  \item {\tt map_compose} theorem, 50
-  \item {\tt map_def} theorem, 50
-  \item {\tt map_flat} theorem, 50
-  \item {\tt map_ident} theorem, 50
-  \item {\tt map_type} theorem, 50
-  \item {\tt max} constant, 61, 80
-  \item {\tt mem} symbol, 82
-  \item {\tt mem_asym} theorem, 36, 37
-  \item {\tt mem_Collect_eq} theorem, 70, 71
-  \item {\tt mem_irrefl} theorem, 36
-  \item {\tt min} constant, 61, 80
-  \item {\tt minus} class, 61
-  \item {\tt mod} symbol, 48, 79, 137
-  \item {\tt mod_def} theorem, 48, 137
-  \item {\tt mod_geq} theorem, 80
-  \item {\tt mod_less} theorem, 80
-  \item {\tt mod_quo_equality} theorem, 48
+  \item {\tt map} constant, 28
+  \item {\tt max} constant, 7, 26
+  \item {\tt mem} symbol, 28
+  \item {\tt mem_Collect_eq} theorem, 16, 17
+  \item {\tt min} constant, 7, 26
+  \item {\tt minus} class, 7
+  \item {\tt mod} symbol, 25, 84
+  \item {\tt mod_def} theorem, 84
+  \item {\tt mod_geq} theorem, 26
+  \item {\tt mod_less} theorem, 26
   \item {\tt Modal} theory, 1
-  \item {\tt mono} constant, 61
-  \item {\tt mp} theorem, 8, 63
-  \item {\tt mp_tac}, \bold{10}, \bold{135}
-  \item {\tt mult_0} theorem, 48
-  \item {\tt mult_assoc} theorem, 48, 137
-  \item {\tt mult_commute} theorem, 48, 137
-  \item {\tt mult_def} theorem, 48, 137
-  \item {\tt mult_succ} theorem, 48
-  \item {\tt mult_type} theorem, 48
-  \item {\tt mult_typing} theorem, 137
-  \item {\tt multC0} theorem, 137
-  \item {\tt multC_succ} theorem, 137
-  \item {\tt mutual_induct_tac}, \bold{94}
+  \item {\tt mono} constant, 7
+  \item {\tt mp} theorem, 9
+  \item {\tt mp_tac}, \bold{82}
+  \item {\tt mult_assoc} theorem, 84
+  \item {\tt mult_commute} theorem, 84
+  \item {\tt mult_def} theorem, 84
+  \item {\tt mult_typing} theorem, 84
+  \item {\tt multC0} theorem, 84
+  \item {\tt multC_succ} theorem, 84
+  \item {\tt mutual_induct_tac}, \bold{40}
+
+  \indexspace
+
+  \item {\tt N} constant, 73
+  \item {\tt n_not_Suc_n} theorem, 25
+  \item {\tt Nat} theory, 26
+  \item {\textit {nat}} type, 25, 26
+  \item {\textit{nat}} type, 24--27
+  \item {\tt nat_induct} theorem, 25
+  \item {\tt nat_rec} constant, 26
+  \item {\tt NatDef} theory, 24
+  \item {\tt NC0} theorem, 77
+  \item {\tt NC_succ} theorem, 77
+  \item {\tt NE} theorem, 76, 77, 85
+  \item {\tt NEL} theorem, 77
+  \item {\tt NF} theorem, 77, 86
+  \item {\tt NI0} theorem, 77
+  \item {\tt NI_succ} theorem, 77
+  \item {\tt NI_succL} theorem, 77
+  \item {\tt NIO} theorem, 85
+  \item {\tt Not} constant, 6, 60
+  \item {\tt not_def} theorem, 10
+  \item {\tt not_sym} theorem, 11
+  \item {\tt notE} theorem, 11
+  \item {\tt notI} theorem, 11
+  \item {\tt notL} theorem, 62
+  \item {\tt notnotD} theorem, 12
+  \item {\tt notR} theorem, 62
+  \item {\tt null} constant, 28
 
   \indexspace
 
-  \item {\tt N} constant, 126
-  \item {\tt n_not_Suc_n} theorem, 79
-  \item {\tt Nat} theory, 47, 80
-  \item {\textit {nat}} type, 79, 80
-  \item {\textit{nat}} type, 78--81
-  \item {\tt nat} constant, 48
-  \item {\tt nat_0I} theorem, 48
-  \item {\tt nat_case} constant, 48
-  \item {\tt nat_case_0} theorem, 48
-  \item {\tt nat_case_def} theorem, 48
-  \item {\tt nat_case_succ} theorem, 48
-  \item {\tt nat_def} theorem, 48
-  \item {\tt nat_induct} theorem, 48, 79
-  \item {\tt nat_rec} constant, 80
-  \item {\tt nat_succI} theorem, 48
-  \item {\tt NatDef} theory, 78
-  \item {\tt NC0} theorem, 130
-  \item {\tt NC_succ} theorem, 130
-  \item {\tt NE} theorem, 129, 130, 138
-  \item {\tt NEL} theorem, 130
-  \item {\tt NF} theorem, 130, 139
-  \item {\tt NI0} theorem, 130
-  \item {\tt NI_succ} theorem, 130
-  \item {\tt NI_succL} theorem, 130
-  \item {\tt Nil_Cons_iff} theorem, 50
-  \item {\tt NilI} theorem, 50
-  \item {\tt NIO} theorem, 138
-  \item {\tt Not} constant, 7, 60, 114
-  \item {\tt not_def} theorem, 8, 43, 64
-  \item {\tt not_impE} theorem, 9
-  \item {\tt not_sym} theorem, 65
-  \item {\tt notE} theorem, 9, 10, 65
-  \item {\tt notI} theorem, 9, 65
-  \item {\tt notL} theorem, 116
-  \item {\tt notnotD} theorem, 11, 66
-  \item {\tt notR} theorem, 116
-  \item {\tt null} constant, 82
-
-  \indexspace
-
-  \item {\tt O} symbol, 46
-  \item {\textit {o}} type, 5, 113
-  \item {\tt o} symbol, 60, 71
-  \item {\tt o_def} theorem, 64
-  \item {\tt of} symbol, 63
-  \item {\tt or_def} theorem, 43, 64
-  \item {\tt Ord} theory, 61
-  \item {\tt ord} class, 61, 62, 80
-  \item {\tt order} class, 61, 80
+  \item {\textit {o}} type, 59
+  \item {\tt o} symbol, 6, 17
+  \item {\tt o_def} theorem, 10
+  \item {\tt of} symbol, 9
+  \item {\tt or_def} theorem, 10
+  \item {\tt Ord} theory, 7
+  \item {\tt ord} class, 7, 8, 26
+  \item {\tt order} class, 7, 26
 
   \indexspace
 
-  \item {\tt pack} ML type, 119
-  \item {\tt Pair} constant, 26, 27, 77
-  \item {\tt pair} constant, 126
-  \item {\tt Pair_def} theorem, 32
-  \item {\tt Pair_eq} theorem, 77
-  \item {\tt Pair_inject} theorem, 38, 77
-  \item {\tt Pair_inject1} theorem, 38
-  \item {\tt Pair_inject2} theorem, 38
-  \item {\tt Pair_neq_0} theorem, 38
-  \item {\tt PairE} theorem, 77
-  \item {\tt pairing} theorem, 35
-  \item {\tt pc_tac}, \bold{121}, \bold{136}, 142, 143
-  \item {\tt Perm} theory, 43
-  \item {\tt Pi} constant, 26, 29, 41
-  \item {\tt Pi_def} theorem, 32
-  \item {\tt Pi_type} theorem, 40, 41
-  \item {\tt plus} class, 61
-  \item {\tt PlusC_inl} theorem, 132
-  \item {\tt PlusC_inr} theorem, 132
-  \item {\tt PlusE} theorem, 132, 136, 140
-  \item {\tt PlusEL} theorem, 132
-  \item {\tt PlusF} theorem, 132
-  \item {\tt PlusFL} theorem, 132
-  \item {\tt PlusI_inl} theorem, 132, 141
-  \item {\tt PlusI_inlL} theorem, 132
-  \item {\tt PlusI_inr} theorem, 132
-  \item {\tt PlusI_inrL} theorem, 132
-  \item {\tt Pow} constant, 26, 68
-  \item {\tt Pow_def} theorem, 71
-  \item {\tt Pow_iff} theorem, 31
-  \item {\tt Pow_mono} theorem, 53
-  \item {\tt PowD} theorem, 34, 54, 73
-  \item {\tt PowI} theorem, 34, 54, 73
-  \item {\tt primrec}, 99--102
-  \item {\tt primrec} symbol, 80
-  \item {\tt PrimReplace} constant, 26, 30
-  \item priorities, 2
-  \item {\tt PROD} symbol, 27, 29, 127, 128
-  \item {\tt Prod} constant, 126
-  \item {\tt Prod} theory, 77
-  \item {\tt ProdC} theorem, 130, 146
-  \item {\tt ProdC2} theorem, 130
-  \item {\tt ProdE} theorem, 130, 143, 145, 147
-  \item {\tt ProdEL} theorem, 130
-  \item {\tt ProdF} theorem, 130
-  \item {\tt ProdFL} theorem, 130
-  \item {\tt ProdI} theorem, 130, 136, 138
-  \item {\tt ProdIL} theorem, 130
-  \item {\tt prop_cs}, \bold{11}, \bold{76}
-  \item {\tt prop_pack}, \bold{119}
+  \item {\tt pack} ML type, 65
+  \item {\tt Pair} constant, 23
+  \item {\tt pair} constant, 73
+  \item {\tt Pair_eq} theorem, 23
+  \item {\tt Pair_inject} theorem, 23
+  \item {\tt PairE} theorem, 23
+  \item {\tt pc_tac}, \bold{67}, \bold{83}, 89, 90
+  \item {\tt plus} class, 7
+  \item {\tt PlusC_inl} theorem, 79
+  \item {\tt PlusC_inr} theorem, 79
+  \item {\tt PlusE} theorem, 79, 83, 87
+  \item {\tt PlusEL} theorem, 79
+  \item {\tt PlusF} theorem, 79
+  \item {\tt PlusFL} theorem, 79
+  \item {\tt PlusI_inl} theorem, 79, 88
+  \item {\tt PlusI_inlL} theorem, 79
+  \item {\tt PlusI_inr} theorem, 79
+  \item {\tt PlusI_inrL} theorem, 79
+  \item {\tt Pow} constant, 14
+  \item {\tt Pow_def} theorem, 17
+  \item {\tt PowD} theorem, 19
+  \item {\tt PowI} theorem, 19
+  \item {\tt primrec}, 45--48
+  \item {\tt primrec} symbol, 26
+  \item priorities, 3
+  \item {\tt PROD} symbol, 74, 75
+  \item {\tt Prod} constant, 73
+  \item {\tt Prod} theory, 23
+  \item {\tt ProdC} theorem, 77, 93
+  \item {\tt ProdC2} theorem, 77
+  \item {\tt ProdE} theorem, 77, 90, 92, 94
+  \item {\tt ProdEL} theorem, 77
+  \item {\tt ProdF} theorem, 77
+  \item {\tt ProdFL} theorem, 77
+  \item {\tt ProdI} theorem, 77, 83, 85
+  \item {\tt ProdIL} theorem, 77
+  \item {\tt prop_cs}, \bold{22}
+  \item {\tt prop_pack}, \bold{66}
 
   \indexspace
 
-  \item {\tt qcase_def} theorem, 44
-  \item {\tt qconverse} constant, 43
-  \item {\tt qconverse_def} theorem, 44
-  \item {\tt qed_spec_mp}, 97
-  \item {\tt qfsplit_def} theorem, 44
-  \item {\tt QInl_def} theorem, 44
-  \item {\tt QInr_def} theorem, 44
-  \item {\tt QPair} theory, 43
-  \item {\tt QPair_def} theorem, 44
-  \item {\tt QSigma} constant, 43
-  \item {\tt QSigma_def} theorem, 44
-  \item {\tt qsplit} constant, 43
-  \item {\tt qsplit_def} theorem, 44
-  \item {\tt qsum_def} theorem, 44
-  \item {\tt QUniv} theory, 47
+  \item {\tt qed_spec_mp}, 43
+
+  \indexspace
+
+  \item {\tt range} constant, 14, 56
+  \item {\tt range_def} theorem, 17
+  \item {\tt rangeE} theorem, 19, 57
+  \item {\tt rangeI} theorem, 19
+  \item {\tt rec} constant, 73, 76
+  \item {\tt recdef}, 48--51
+  \item {\tt record}, 33
+  \item {\tt record_split_tac}, 35, 36
+  \item recursion
+    \subitem general, 48--51
+    \subitem primitive, 45--48
+  \item recursive functions, \see{recursion}{44}
+  \item {\tt red_if_equal} theorem, 76
+  \item {\tt Reduce} constant, 73, 76, 82
+  \item {\tt refl} theorem, 9, 62
+  \item {\tt refl_elem} theorem, 76, 80
+  \item {\tt refl_red} theorem, 76
+  \item {\tt refl_type} theorem, 76, 80
+  \item {\tt REPEAT_FIRST}, 81
+  \item {\tt repeat_goal_tac}, \bold{67}
+  \item {\tt replace_type} theorem, 80, 92
+  \item {\tt reresolve_tac}, \bold{67}
+  \item {\tt res_inst_tac}, 8
+  \item {\tt rev} constant, 28
+  \item {\tt rew_tac}, \bold{82}
+  \item {\tt RL}, 87
+  \item {\tt RS}, 92, 94
 
   \indexspace
 
-  \item {\tt range} constant, 26, 68, 110
-  \item {\tt range_def} theorem, 32, 71
-  \item {\tt range_of_fun} theorem, 40, 41
-  \item {\tt range_subset} theorem, 39
-  \item {\tt range_type} theorem, 40
-  \item {\tt rangeE} theorem, 39, 73, 111
-  \item {\tt rangeI} theorem, 39, 73
-  \item {\tt rank} constant, 49
-  \item {\tt rank_ss}, \bold{24}
-  \item {\tt rec} constant, 48, 126, 129
-  \item {\tt rec_0} theorem, 48
-  \item {\tt rec_def} theorem, 48
-  \item {\tt rec_succ} theorem, 48
-  \item {\tt recdef}, 102--105
-  \item {\tt record}, 87
-  \item {\tt record_split_tac}, 89, 90
-  \item recursion
-    \subitem general, 102--105
-    \subitem primitive, 99--102
-  \item recursive functions, \see{recursion}{98}
-  \item {\tt red_if_equal} theorem, 129
-  \item {\tt Reduce} constant, 126, 129, 135
-  \item {\tt refl} theorem, 8, 63, 116
-  \item {\tt refl_elem} theorem, 129, 133
-  \item {\tt refl_red} theorem, 129
-  \item {\tt refl_type} theorem, 129, 133
-  \item {\tt REPEAT_FIRST}, 134
-  \item {\tt repeat_goal_tac}, \bold{121}
-  \item {\tt RepFun} constant, 26, 29, 30, 33
-  \item {\tt RepFun_def} theorem, 31
-  \item {\tt RepFunE} theorem, 35
-  \item {\tt RepFunI} theorem, 35
-  \item {\tt Replace} constant, 26, 29, 30, 33
-  \item {\tt Replace_def} theorem, 31
-  \item {\tt replace_type} theorem, 133, 145
-  \item {\tt ReplaceE} theorem, 35
-  \item {\tt ReplaceI} theorem, 35
-  \item {\tt replacement} theorem, 31
-  \item {\tt reresolve_tac}, \bold{121}
-  \item {\tt res_inst_tac}, 62
-  \item {\tt restrict} constant, 26, 33
-  \item {\tt restrict} theorem, 40
-  \item {\tt restrict_bij} theorem, 46
-  \item {\tt restrict_def} theorem, 32
-  \item {\tt restrict_type} theorem, 40
-  \item {\tt rev} constant, 50, 82
-  \item {\tt rev_def} theorem, 50
-  \item {\tt rew_tac}, 19, \bold{135}
-  \item {\tt rewrite_rule}, 19
-  \item {\tt right_comp_id} theorem, 46
-  \item {\tt right_comp_inverse} theorem, 46
-  \item {\tt right_inverse} theorem, 46
-  \item {\tt RL}, 140
-  \item {\tt RS}, 145, 147
+  \item {\tt safe_goal_tac}, \bold{67}
+  \item {\tt safe_tac}, \bold{83}
+  \item {\tt safestep_tac}, \bold{83}
+  \item search
+    \subitem best-first, 58
+  \item {\tt select_equality} theorem, 10, 12
+  \item {\tt selectI} theorem, 9, 10
+  \item {\tt Seqof} constant, 60
+  \item sequent calculus, 59--71
+  \item {\tt Set} theory, 13, 16
+  \item {\tt set} constant, 28
+  \item {\tt set} type, 13
+  \item {\tt set_current_thy}, 58
+  \item {\tt set_diff_def} theorem, 17
+  \item {\tt show_sorts}, 8
+  \item {\tt show_types}, 8
+  \item {\tt Sigma} constant, 23
+  \item {\tt Sigma_def} theorem, 23
+  \item {\tt SigmaE} theorem, 23
+  \item {\tt SigmaI} theorem, 23
+  \item simplification
+    \subitem of conjunctions, 21
+  \item {\tt size} constant, 39
+  \item {\tt snd} constant, 23, 73, 78
+  \item {\tt snd_conv} theorem, 23
+  \item {\tt snd_def} theorem, 78
+  \item {\tt sobj} type, 63
+  \item {\tt spec} theorem, 12
+  \item {\tt split} constant, 23, 73, 87
+  \item {\tt split} theorem, 23
+  \item {\tt split_all_tac}, \bold{24}
+  \item {\tt split_if} theorem, 12, 22
+  \item {\tt split_list_case} theorem, 27
+  \item {\tt split_split} theorem, 23
+  \item {\tt split_sum_case} theorem, 25
+  \item {\tt ssubst} theorem, 11, 13
+  \item {\tt stac}, \bold{21}
+  \item {\tt step_tac}, \bold{67}, \bold{83}
+  \item {\tt strip_tac}, \bold{13}
+  \item {\tt subset_def} theorem, 17
+  \item {\tt subset_refl} theorem, 18
+  \item {\tt subset_trans} theorem, 18
+  \item {\tt subsetCE} theorem, 16, 18
+  \item {\tt subsetD} theorem, 16, 18
+  \item {\tt subsetI} theorem, 18
+  \item {\tt subst} theorem, 9
+  \item {\tt subst_elem} theorem, 76
+  \item {\tt subst_elemL} theorem, 76
+  \item {\tt subst_eqtyparg} theorem, 80, 92
+  \item {\tt subst_prodE} theorem, 78, 80
+  \item {\tt subst_type} theorem, 76
+  \item {\tt subst_typeL} theorem, 76
+  \item {\tt Suc} constant, 25
+  \item {\tt Suc_not_Zero} theorem, 25
+  \item {\tt succ} constant, 73
+  \item {\tt SUM} symbol, 74, 75
+  \item {\tt Sum} constant, 73
+  \item {\tt Sum} theory, 24
+  \item {\tt sum_case} constant, 25
+  \item {\tt sum_case_Inl} theorem, 25
+  \item {\tt sum_case_Inr} theorem, 25
+  \item {\tt SumC} theorem, 78
+  \item {\tt SumE} theorem, 78, 83, 87
+  \item {\tt sumE} theorem, 25
+  \item {\tt SumE_fst} theorem, 78, 80, 92, 93
+  \item {\tt SumE_snd} theorem, 78, 80, 94
+  \item {\tt SumEL} theorem, 78
+  \item {\tt SumF} theorem, 78
+  \item {\tt SumFL} theorem, 78
+  \item {\tt SumI} theorem, 78, 88
+  \item {\tt SumIL} theorem, 78
+  \item {\tt SumIL2} theorem, 80
+  \item {\tt surj} constant, 17, 21
+  \item {\tt surj_def} theorem, 21
+  \item {\tt surjective_pairing} theorem, 23
+  \item {\tt surjective_sum} theorem, 25
+  \item {\tt swap} theorem, 12
+  \item {\tt swap_res_tac}, 57
+  \item {\tt sym} theorem, 11, 62
+  \item {\tt sym_elem} theorem, 76
+  \item {\tt sym_type} theorem, 76
+  \item {\tt symL} theorem, 63
 
   \indexspace
 
-  \item {\tt safe_goal_tac}, \bold{121}
-  \item {\tt safe_tac}, \bold{136}
-  \item {\tt safestep_tac}, \bold{136}
-  \item search
-    \subitem best-first, 112
-  \item {\tt select_equality} theorem, 64, 66
-  \item {\tt selectI} theorem, 63, 64
-  \item {\tt separation} theorem, 35
-  \item {\tt Seqof} constant, 114
-  \item sequent calculus, 113--124
-  \item {\tt Set} theory, 67, 70
-  \item {\tt set} constant, 82
-  \item {\tt set} type, 67
-  \item set theory, 24--58
-  \item {\tt set_current_thy}, 112
-  \item {\tt set_diff_def} theorem, 71
-  \item {\tt show_sorts}, 62
-  \item {\tt show_types}, 62
-  \item {\tt Sigma} constant, 26, 29, 30, 38, 77
-  \item {\tt Sigma_def} theorem, 32, 77
-  \item {\tt SigmaE} theorem, 38, 77
-  \item {\tt SigmaE2} theorem, 38
-  \item {\tt SigmaI} theorem, 38, 77
-  \item simplification
-    \subitem of conjunctions, 6, 75
-  \item {\tt singletonE} theorem, 36
-  \item {\tt singletonI} theorem, 36
-  \item {\tt size} constant, 93
-  \item {\tt snd} constant, 26, 33, 77, 126, 131
-  \item {\tt snd_conv} theorem, 38, 77
-  \item {\tt snd_def} theorem, 32, 131
-  \item {\tt sobj} type, 117
-  \item {\tt spec} theorem, 8, 66
-  \item {\tt split} constant, 26, 33, 77, 126, 140
-  \item {\tt split} theorem, 38, 77
-  \item {\tt split_all_tac}, \bold{78}
-  \item {\tt split_def} theorem, 32
-  \item {\tt split_if} theorem, 66, 76
-  \item {\tt split_list_case} theorem, 81
-  \item {\tt split_split} theorem, 77
-  \item {\tt split_sum_case} theorem, 79
-  \item {\tt ssubst} theorem, 9, 65, 67
-  \item {\tt stac}, \bold{75}
-  \item {\tt Step_tac}, 22
-  \item {\tt step_tac}, 23, \bold{121}, \bold{136}
-  \item {\tt strip_tac}, \bold{67}
-  \item {\tt subset_def} theorem, 31, 71
-  \item {\tt subset_refl} theorem, 34, 72
-  \item {\tt subset_trans} theorem, 34, 72
-  \item {\tt subsetCE} theorem, 34, 70, 72
-  \item {\tt subsetD} theorem, 34, 56, 70, 72
-  \item {\tt subsetI} theorem, 34, 54, 55, 72
-  \item {\tt subst} theorem, 8, 63
-  \item {\tt subst_elem} theorem, 129
-  \item {\tt subst_elemL} theorem, 129
-  \item {\tt subst_eqtyparg} theorem, 133, 145
-  \item {\tt subst_prodE} theorem, 131, 133
-  \item {\tt subst_type} theorem, 129
-  \item {\tt subst_typeL} theorem, 129
-  \item {\tt Suc} constant, 79
-  \item {\tt Suc_not_Zero} theorem, 79
-  \item {\tt succ} constant, 26, 30, 126
-  \item {\tt succ_def} theorem, 32
-  \item {\tt succ_inject} theorem, 36
-  \item {\tt succ_neq_0} theorem, 36
-  \item {\tt succCI} theorem, 36
-  \item {\tt succE} theorem, 36
-  \item {\tt succI1} theorem, 36
-  \item {\tt succI2} theorem, 36
-  \item {\tt SUM} symbol, 27, 29, 127, 128
-  \item {\tt Sum} constant, 126
-  \item {\tt Sum} theory, 43, 78
-  \item {\tt sum_case} constant, 79
-  \item {\tt sum_case_Inl} theorem, 79
-  \item {\tt sum_case_Inr} theorem, 79
-  \item {\tt sum_def} theorem, 44
-  \item {\tt sum_InlI} theorem, 44
-  \item {\tt sum_InrI} theorem, 44
-  \item {\tt SUM_Int_distrib1} theorem, 42
-  \item {\tt SUM_Int_distrib2} theorem, 42
-  \item {\tt SUM_Un_distrib1} theorem, 42
-  \item {\tt SUM_Un_distrib2} theorem, 42
-  \item {\tt SumC} theorem, 131
-  \item {\tt SumE} theorem, 131, 136, 140
-  \item {\tt sumE} theorem, 79
-  \item {\tt sumE2} theorem, 44
-  \item {\tt SumE_fst} theorem, 131, 133, 145, 146
-  \item {\tt SumE_snd} theorem, 131, 133, 147
-  \item {\tt SumEL} theorem, 131
-  \item {\tt SumF} theorem, 131
-  \item {\tt SumFL} theorem, 131
-  \item {\tt SumI} theorem, 131, 141
-  \item {\tt SumIL} theorem, 131
-  \item {\tt SumIL2} theorem, 133
-  \item {\tt surj} constant, 46, 71, 75
-  \item {\tt surj_def} theorem, 46, 75
-  \item {\tt surjective_pairing} theorem, 77
-  \item {\tt surjective_sum} theorem, 79
-  \item {\tt swap} theorem, 11, 66
-  \item {\tt swap_res_tac}, 16, 111
-  \item {\tt sym} theorem, 9, 65, 116
-  \item {\tt sym_elem} theorem, 129
-  \item {\tt sym_type} theorem, 129
-  \item {\tt symL} theorem, 117
+  \item {\tt T} constant, 73
+  \item {\textit {t}} type, 72
+  \item {\tt take} constant, 28
+  \item {\tt takeWhile} constant, 28
+  \item {\tt TC} theorem, 79
+  \item {\tt TE} theorem, 79
+  \item {\tt TEL} theorem, 79
+  \item {\tt term} class, 7, 59
+  \item {\tt test_assume_tac}, \bold{81}
+  \item {\tt TF} theorem, 79
+  \item {\tt THE} symbol, 60
+  \item {\tt The} constant, 60
+  \item {\tt The} theorem, 62
+  \item {\tt thinL} theorem, 62
+  \item {\tt thinR} theorem, 62
+  \item {\tt TI} theorem, 79
+  \item {\tt times} class, 7
+  \item {\tt tl} constant, 28
+  \item tracing
+    \subitem of unification, 8
+  \item {\tt trans} theorem, 11, 62
+  \item {\tt trans_elem} theorem, 76
+  \item {\tt trans_red} theorem, 76
+  \item {\tt trans_tac}, 27
+  \item {\tt trans_type} theorem, 76
+  \item {\tt True} constant, 6, 60
+  \item {\tt True_def} theorem, 10, 62
+  \item {\tt True_or_False} theorem, 9, 10
+  \item {\tt TrueI} theorem, 11
+  \item {\tt Trueprop} constant, 6, 60
+  \item {\tt TrueR} theorem, 63
+  \item {\tt tt} constant, 73
+  \item {\tt Type} constant, 73
+  \item type definition, \bold{30}
+  \item {\tt typechk_tac}, \bold{81}, 86, 89, 93, 94
+  \item {\tt typedef}, 27
 
   \indexspace
 
-  \item {\tt T} constant, 126
-  \item {\textit {t}} type, 125
-  \item {\tt take} constant, 82
-  \item {\tt takeWhile} constant, 82
-  \item {\tt TC} theorem, 132
-  \item {\tt TE} theorem, 132
-  \item {\tt TEL} theorem, 132
-  \item {\tt term} class, 5, 61, 113
-  \item {\tt test_assume_tac}, \bold{134}
-  \item {\tt TF} theorem, 132
-  \item {\tt THE} symbol, 27, 29, 37, 114
-  \item {\tt The} constant, 26, 29, 30, 114
-  \item {\tt The} theorem, 116
-  \item {\tt the_def} theorem, 31
-  \item {\tt the_equality} theorem, 36, 37
-  \item {\tt theI} theorem, 36, 37
-  \item {\tt thinL} theorem, 116
-  \item {\tt thinR} theorem, 116
-  \item {\tt TI} theorem, 132
-  \item {\tt times} class, 61
-  \item {\tt tl} constant, 82
-  \item tracing
-    \subitem of unification, 62
-  \item {\tt trans} theorem, 9, 65, 116
-  \item {\tt trans_elem} theorem, 129
-  \item {\tt trans_red} theorem, 129
-  \item {\tt trans_tac}, 81
-  \item {\tt trans_type} theorem, 129
-  \item {\tt True} constant, 7, 60, 114
-  \item {\tt True_def} theorem, 8, 64, 116
-  \item {\tt True_or_False} theorem, 63, 64
-  \item {\tt TrueI} theorem, 9, 65
-  \item {\tt Trueprop} constant, 7, 60, 114
-  \item {\tt TrueR} theorem, 117
-  \item {\tt tt} constant, 126
-  \item {\tt Type} constant, 126
-  \item type definition, \bold{84}
-  \item {\tt typechk_tac}, \bold{134}, 139, 142, 146, 147
-  \item {\tt typedef}, 81
+  \item {\tt UN} symbol, 14--16
+  \item {\tt Un} symbol, 14
+  \item {\tt Un1} theorem, 16
+  \item {\tt Un2} theorem, 16
+  \item {\tt Un_absorb} theorem, 20
+  \item {\tt Un_assoc} theorem, 20
+  \item {\tt Un_commute} theorem, 20
+  \item {\tt Un_def} theorem, 17
+  \item {\tt UN_E} theorem, 19
+  \item {\tt UN_I} theorem, 19
+  \item {\tt Un_Int_distrib} theorem, 20
+  \item {\tt Un_Inter} theorem, 20
+  \item {\tt Un_least} theorem, 20
+  \item {\tt Un_Union_image} theorem, 20
+  \item {\tt Un_upper1} theorem, 20
+  \item {\tt Un_upper2} theorem, 20
+  \item {\tt UnCI} theorem, 16, 19
+  \item {\tt UnE} theorem, 19
+  \item {\tt UnI1} theorem, 19
+  \item {\tt UnI2} theorem, 19
+  \item unification
+    \subitem incompleteness of, 8
+  \item {\tt Unify.trace_types}, 8
+  \item {\tt UNION} constant, 14
+  \item {\tt Union} constant, 14
+  \item {\tt UNION1} constant, 14
+  \item {\tt UNION1_def} theorem, 17
+  \item {\tt UNION_def} theorem, 17
+  \item {\tt Union_def} theorem, 17
+  \item {\tt Union_least} theorem, 20
+  \item {\tt Union_Un_distrib} theorem, 20
+  \item {\tt Union_upper} theorem, 20
+  \item {\tt UnionE} theorem, 19
+  \item {\tt UnionI} theorem, 19
+  \item {\tt unit_eq} theorem, 24
 
   \indexspace
 
-  \item {\tt UN} symbol, 27, 29, 68--70
-  \item {\tt Un} symbol, 26, 68
-  \item {\tt Un1} theorem, 70
-  \item {\tt Un2} theorem, 70
-  \item {\tt Un_absorb} theorem, 42, 74
-  \item {\tt Un_assoc} theorem, 42, 74
-  \item {\tt Un_commute} theorem, 42, 74
-  \item {\tt Un_def} theorem, 31, 71
-  \item {\tt UN_E} theorem, 35, 73
-  \item {\tt UN_I} theorem, 35, 73
-  \item {\tt Un_Int_distrib} theorem, 42, 74
-  \item {\tt Un_Inter} theorem, 74
-  \item {\tt Un_Inter_RepFun} theorem, 42
-  \item {\tt Un_least} theorem, 37, 74
-  \item {\tt Un_Union_image} theorem, 74
-  \item {\tt Un_upper1} theorem, 37, 74
-  \item {\tt Un_upper2} theorem, 37, 74
-  \item {\tt UnCI} theorem, 36, 37, 70, 73
-  \item {\tt UnE} theorem, 36, 73
-  \item {\tt UnI1} theorem, 36, 37, 57, 73
-  \item {\tt UnI2} theorem, 36, 37, 73
-  \item unification
-    \subitem incompleteness of, 62
-  \item {\tt Unify.trace_types}, 62
-  \item {\tt UNION} constant, 68
-  \item {\tt Union} constant, 26, 68
-  \item {\tt UNION1} constant, 68
-  \item {\tt UNION1_def} theorem, 71
-  \item {\tt UNION_def} theorem, 71
-  \item {\tt Union_def} theorem, 71
-  \item {\tt Union_iff} theorem, 31
-  \item {\tt Union_least} theorem, 37, 74
-  \item {\tt Union_Un_distrib} theorem, 42, 74
-  \item {\tt Union_upper} theorem, 37, 74
-  \item {\tt UnionE} theorem, 35, 56, 73
-  \item {\tt UnionI} theorem, 35, 56, 73
-  \item {\tt unit_eq} theorem, 78
-  \item {\tt Univ} theory, 47
-  \item {\tt Upair} constant, 25, 26, 30
-  \item {\tt Upair_def} theorem, 31
-  \item {\tt UpairE} theorem, 35
-  \item {\tt UpairI1} theorem, 35
-  \item {\tt UpairI2} theorem, 35
+  \item {\tt when} constant, 73, 78, 87
 
   \indexspace
 
-  \item {\tt vimage_def} theorem, 32
-  \item {\tt vimageE} theorem, 39
-  \item {\tt vimageI} theorem, 39
-
-  \indexspace
-
-  \item {\tt when} constant, 126, 131, 140
-
-  \indexspace
-
-  \item {\tt xor_def} theorem, 43
-
-  \indexspace
-
-  \item {\tt zero_ne_succ} theorem, 129, 130
-  \item {\tt ZF} theory, 1, 24, 59
-  \item {\tt ZF_cs}, \bold{24}
-  \item {\tt ZF_ss}, \bold{24}
+  \item {\tt zero_ne_succ} theorem, 76, 77
+  \item {\tt ZF} theory, 5
 
 \end{theindex}
--- a/doc-src/Logics/logics.tex	Fri Jan 08 13:20:59 1999 +0100
+++ b/doc-src/Logics/logics.tex	Fri Jan 08 14:02:04 1999 +0100
@@ -1,3 +1,4 @@
+%% $Id$
 \documentclass[12pt]{report}
 \usepackage{graphicx,a4,latexsym,../pdfsetup}
 
@@ -8,15 +9,13 @@
 \input{../extra.sty}
 \makeatother
 
-%% $Id$
 %%%STILL NEEDS MODAL, LCF
-%%%\includeonly{ZF}
 %%% to index derived rls:  ^\([a-zA-Z0-9][a-zA-Z0-9_]*\)        \\tdx{\1}  
 %%% to index rulenames:   ^ *(\([a-zA-Z0-9][a-zA-Z0-9_]*\),     \\tdx{\1}  
 %%% to index constants:   \\tt \([a-zA-Z0-9][a-zA-Z0-9_]*\)     \\cdx{\1}  
 %%% to deverbify:         \\verb|\([^|]*\)|     \\ttindex{\1}  
 %% run    ../sedindex logics    to prepare index file
-\title{\includegraphics[scale=0.5]{isabelle.eps} \\[4ex] Isabelle's Object-Logics}
+\title{\includegraphics[scale=0.5]{isabelle.eps} \\[4ex] Isabelle's Logics}
 
 \author{{\em Lawrence C. Paulson}\\
         Computer Laboratory \\ University of Cambridge \\
@@ -25,9 +24,8 @@
 \thanks{Tobias Nipkow revised and extended
     the chapter on \HOL.  Markus Wenzel made numerous improvements.
     Philippe de Groote wrote the
-    first version of the logic~\LK{} and contributed to~\ZF{}.  Tobias
-    Nipkow developed~\HOL{}, \LCF{} and~\Cube{}.  Philippe No\"el and
-    Martin Coen made many contributions to~\ZF{}.  Martin Coen
+    first version of the logic~\LK{}.  Tobias
+    Nipkow developed~\HOL{}, \LCF{} and~\Cube{}.  Martin Coen
     developed~\Modal{} with assistance from Rajeev Gor\'e.  The research has 
     been funded by the EPSRC (grants GR/G53279, GR/H40570, GR/K57381,
     GR/K77051) and by ESPRIT project 6453: Types.}
@@ -39,9 +37,6 @@
 
 \makeindex
 
-%%\newenvironment{example}{\begin{Example}\rm}{\end{Example}}
-%%\newtheorem{Example}{Example}[chapter]
-
 \underscoreoff
 
 \setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}  %% {secnumdepth}{2}???
@@ -53,9 +48,8 @@
 \begin{document}
 \maketitle 
 \pagenumbering{roman} \tableofcontents \clearfirst
-\include{intro}
-\include{FOL}
-\include{ZF}
+\include{preface}
+\include{syntax}
 \include{HOL}
 \include{LK}
 %%\include{Modal}