more standard document preparation within session context;
authorwenzelm
Mon, 27 Aug 2012 21:46:00 +0200
changeset 48946 a9b8344f5196
parent 48945 b5758f5a469c
child 48947 7eee8b2d2099
more standard document preparation within session context;
doc-src/ROOT
doc-src/ZF/FOL.tex
doc-src/ZF/ZF.tex
doc-src/ZF/document/FOL.tex
doc-src/ZF/document/ZF.tex
doc-src/ZF/document/build
doc-src/ZF/document/logics.sty
doc-src/ZF/document/root.tex
doc-src/ZF/logics-ZF.tex
doc-src/ZF/logics.sty
--- a/doc-src/ROOT	Mon Aug 27 21:37:34 2012 +0200
+++ b/doc-src/ROOT	Mon Aug 27 21:46:00 2012 +0200
@@ -136,6 +136,21 @@
     "document/build"
     "document/root.tex"
 
+session "Logics-ZF" (doc) in "ZF" = ZF +
+  options [document_variants = "logics-ZF", print_mode = "brackets"]
+  theories
+    IFOL_examples
+    FOL_examples
+    ZF_examples
+    If
+  files
+    "../ttbox.sty"
+    "../proof.sty"
+    "../manual.bib"
+    "../Logics/document/syntax.tex"
+    "document/build"
+    "document/root.tex"
+
 session Main (doc) in "Main" = HOL +
   options [document_variants = "main"]
   theories Main_Doc
@@ -261,13 +276,3 @@
   options [browser_info = false, document = false, print_mode = "brackets"]
   theories ToyList
 
-session "ZF-examples" (doc) in "ZF" = ZF +
-  options [browser_info = false, document = false,
-    document_dump = document, document_dump_mode = "tex",
-    print_mode = "brackets"]
-  theories
-    IFOL_examples
-    FOL_examples
-    ZF_examples
-    If
-
--- a/doc-src/ZF/FOL.tex	Mon Aug 27 21:37:34 2012 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,855 +0,0 @@
-%!TEX root = logics-ZF.tex
-\chapter{First-Order Logic}
-\index{first-order logic|(}
-
-Isabelle implements Gentzen's natural deduction systems {\sc nj} and {\sc
-  nk}.  Intuitionistic first-order logic is defined first, as theory
-\thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
-obtained by adding the double negation rule.  Basic proof procedures are
-provided.  The intuitionistic prover works with derived rules to simplify
-implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
-classical reasoner, which simulates a sequent calculus.
-
-\section{Syntax and rules of inference}
-The logic is many-sorted, using Isabelle's type classes.  The class of
-first-order terms is called \cldx{term} and is a subclass of \isa{logic}.
-No types of individuals are provided, but extensions can define types such
-as \isa{nat::term} and type constructors such as \isa{list::(term)term}
-(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
-$\alpha$ ranges over class \isa{term}; the equality symbol and quantifiers
-are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
-belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
-Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
-
-Figure~\ref{fol-rules} shows the inference rules with their names.
-Negation is defined in the usual way for intuitionistic logic; $\neg P$
-abbreviates $P\imp\bot$.  The biconditional~($\bimp$) is defined through
-$\conj$ and~$\imp$; introduction and elimination rules are derived for it.
-
-The unique existence quantifier, $\exists!x.P(x)$, is defined in terms
-of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
-quantifications.  For instance, $\exists!x\;y.P(x,y)$ abbreviates
-$\exists!x. \exists!y.P(x,y)$; note that this does not mean that there
-exists a unique pair $(x,y)$ satisfying~$P(x,y)$.
-
-Some intuitionistic derived rules are shown in
-Fig.\ts\ref{fol-int-derived}, again with their names.  These include
-rules for the defined symbols $\neg$, $\bimp$ and $\exists!$.  Natural
-deduction typically involves a combination of forward and backward
-reasoning, particularly with the destruction rules $(\conj E)$,
-$({\imp}E)$, and~$(\forall E)$.  Isabelle's backward style handles these
-rules badly, so sequent-style rules are derived to eliminate conjunctions,
-implications, and universal quantifiers.  Used with elim-resolution,
-\tdx{allE} eliminates a universal quantifier while \tdx{all_dupE}
-re-inserts the quantified formula for later use.  The rules
-\isa{conj\_impE}, etc., support the intuitionistic proof procedure
-(see~\S\ref{fol-int-prover}).
-
-See the on-line theory library for complete listings of the rules and
-derived rules.
-
-\begin{figure} 
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Trueprop}& $o\To prop$           & coercion to $prop$\\
-  \cdx{Not}     & $o\To o$              & negation ($\neg$) \\
-  \cdx{True}    & $o$                   & tautology ($\top$) \\
-  \cdx{False}   & $o$                   & absurdity ($\bot$)
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\begin{tabular}{llrrr} 
-  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
-  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
-        universal quantifier ($\forall$) \\
-  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
-        existential quantifier ($\exists$) \\
-  \isa{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
-        unique existence ($\exists!$)
-\end{tabular}
-\index{*"E"X"! symbol}
-\end{center}
-\subcaption{Binders} 
-
-\begin{center}
-\index{*"= symbol}
-\index{&@{\tt\&} symbol}
-\index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working
-\index{*"-"-"> symbol}
-\index{*"<"-"> symbol}
-\begin{tabular}{rrrr} 
-  \it symbol    & \it meta-type         & \it priority & \it description \\ 
-  \tt =         & $[\alpha,\alpha]\To o$ & Left 50 & equality ($=$) \\
-  \tt \&        & $[o,o]\To o$          & Right 35 & conjunction ($\conj$) \\
-  \tt |         & $[o,o]\To o$          & Right 30 & disjunction ($\disj$) \\
-  \tt -->       & $[o,o]\To o$          & Right 25 & implication ($\imp$) \\
-  \tt <->       & $[o,o]\To o$          & Right 25 & biconditional ($\bimp$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-
-\dquotes
-\[\begin{array}{rcl}
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " = " term \quad| \quad term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\subcaption{Grammar}
-\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{refl}        a=a
-\tdx{subst}       [| a=b;  P(a) |] ==> P(b)
-\subcaption{Equality rules}
-
-\tdx{conjI}       [| P;  Q |] ==> P&Q
-\tdx{conjunct1}   P&Q ==> P
-\tdx{conjunct2}   P&Q ==> Q
-
-\tdx{disjI1}      P ==> P|Q
-\tdx{disjI2}      Q ==> P|Q
-\tdx{disjE}       [| P|Q;  P ==> R;  Q ==> R |] ==> R
-
-\tdx{impI}        (P ==> Q) ==> P-->Q
-\tdx{mp}          [| P-->Q;  P |] ==> Q
-
-\tdx{FalseE}      False ==> P
-\subcaption{Propositional rules}
-
-\tdx{allI}        (!!x. P(x))  ==> (ALL x.P(x))
-\tdx{spec}        (ALL x.P(x)) ==> P(x)
-
-\tdx{exI}         P(x) ==> (EX x.P(x))
-\tdx{exE}         [| EX x.P(x);  !!x. P(x) ==> R |] ==> R
-\subcaption{Quantifier rules}
-
-\tdx{True_def}    True        == False-->False
-\tdx{not_def}     ~P          == P-->False
-\tdx{iff_def}     P<->Q       == (P-->Q) & (Q-->P)
-\tdx{ex1_def}     EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)
-\subcaption{Definitions}
-\end{ttbox}
-
-\caption{Rules of intuitionistic logic} \label{fol-rules}
-\end{figure}
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{sym}       a=b ==> b=a
-\tdx{trans}     [| a=b;  b=c |] ==> a=c
-\tdx{ssubst}    [| b=a;  P(a) |] ==> P(b)
-\subcaption{Derived equality rules}
-
-\tdx{TrueI}     True
-
-\tdx{notI}      (P ==> False) ==> ~P
-\tdx{notE}      [| ~P;  P |] ==> R
-
-\tdx{iffI}      [| P ==> Q;  Q ==> P |] ==> P<->Q
-\tdx{iffE}      [| P <-> Q;  [| P-->Q; Q-->P |] ==> R |] ==> R
-\tdx{iffD1}     [| P <-> Q;  P |] ==> Q            
-\tdx{iffD2}     [| P <-> Q;  Q |] ==> P
-
-\tdx{ex1I}      [| P(a);  !!x. P(x) ==> x=a |]  ==>  EX! x. P(x)
-\tdx{ex1E}      [| EX! x.P(x);  !!x.[| P(x);  ALL y. P(y) --> y=x |] ==> R 
-          |] ==> R
-\subcaption{Derived rules for \(\top\), \(\neg\), \(\bimp\) and \(\exists!\)}
-
-\tdx{conjE}     [| P&Q;  [| P; Q |] ==> R |] ==> R
-\tdx{impE}      [| P-->Q;  P;  Q ==> R |] ==> R
-\tdx{allE}      [| ALL x.P(x);  P(x) ==> R |] ==> R
-\tdx{all_dupE}  [| ALL x.P(x);  [| P(x); ALL x.P(x) |] ==> R |] ==> R
-\subcaption{Sequent-style elimination rules}
-
-\tdx{conj_impE} [| (P&Q)-->S;  P-->(Q-->S) ==> R |] ==> R
-\tdx{disj_impE} [| (P|Q)-->S;  [| P-->S; Q-->S |] ==> R |] ==> R
-\tdx{imp_impE}  [| (P-->Q)-->S;  [| P; Q-->S |] ==> Q;  S ==> R |] ==> R
-\tdx{not_impE}  [| ~P --> S;  P ==> False;  S ==> R |] ==> R
-\tdx{iff_impE}  [| (P<->Q)-->S; [| P; Q-->S |] ==> Q; [| Q; P-->S |] ==> P;
-             S ==> R |] ==> R
-\tdx{all_impE}  [| (ALL x.P(x))-->S;  !!x.P(x);  S ==> R |] ==> R
-\tdx{ex_impE}   [| (EX x.P(x))-->S;  P(a)-->S ==> R |] ==> R
-\end{ttbox}
-\subcaption{Intuitionistic simplification of implication}
-\caption{Derived rules for intuitionistic logic} \label{fol-int-derived}
-\end{figure}
-
-
-\section{Generic packages}
-FOL instantiates most of Isabelle's generic packages.
-\begin{itemize}
-\item 
-It instantiates the simplifier, which is invoked through the method 
-\isa{simp}.  Both equality ($=$) and the biconditional
-($\bimp$) may be used for rewriting.  
-
-\item 
-It instantiates the classical reasoner, which is invoked mainly through the 
-methods \isa{blast} and \isa{auto}.  See~\S\ref{fol-cla-prover}
-for details. 
-%
-%\item FOL provides the tactic \ttindex{hyp_subst_tac}, which substitutes for
-%  an equality throughout a subgoal and its hypotheses.  This tactic uses FOL's
-%  general substitution rule.
-\end{itemize}
-
-\begin{warn}\index{simplification!of conjunctions}%
-  Simplifying $a=b\conj P(a)$ to $a=b\conj P(b)$ is often advantageous.  The
-  left part of a conjunction helps in simplifying the right part.  This effect
-  is not available by default: it can be slow.  It can be obtained by
-  including the theorem \isa{conj_cong}\index{*conj_cong (rule)}%
-  as a congruence rule in
-  simplification, \isa{simp cong:\ conj\_cong}.
-\end{warn}
-
-
-\section{Intuitionistic proof procedures} \label{fol-int-prover}
-Implication elimination (the rules~\isa{mp} and~\isa{impE}) pose
-difficulties for automated proof.  In intuitionistic logic, the assumption
-$P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
-use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
-use of $P\imp Q$.  If the proof of~$P$ fails then the whole branch of the
-proof must be abandoned.  Thus intuitionistic propositional logic requires
-backtracking.  
-
-For an elementary example, consider the intuitionistic proof of $Q$ from
-$P\imp Q$ and $(P\imp Q)\imp P$.  The implication $P\imp Q$ is needed
-twice:
-\[ \infer[({\imp}E)]{Q}{P\imp Q &
-       \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
-\]
-The theorem prover for intuitionistic logic does not use~\isa{impE}.\@
-Instead, it simplifies implications using derived rules
-(Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
-to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
-The rules \tdx{conj_impE} and \tdx{disj_impE} are 
-straightforward: $(P\conj Q)\imp S$ is equivalent to $P\imp (Q\imp S)$, and
-$(P\disj Q)\imp S$ is equivalent to the conjunction of $P\imp S$ and $Q\imp
-S$.  The other \ldots\isa{\_impE} rules are unsafe; the method requires
-backtracking.  All the rules are derived in the same simple manner.
-
-Dyckhoff has independently discovered similar rules, and (more importantly)
-has demonstrated their completeness for propositional
-logic~\cite{dyckhoff}.  However, the tactics given below are not complete
-for first-order logic because they discard universally quantified
-assumptions after a single use. These are \ML{} functions, and are listed
-below with their \ML{} type:
-\begin{ttbox} 
-mp_tac              : int -> tactic
-eq_mp_tac           : int -> tactic
-IntPr.safe_step_tac : int -> tactic
-IntPr.safe_tac      :        tactic
-IntPr.inst_step_tac : int -> tactic
-IntPr.step_tac      : int -> tactic
-IntPr.fast_tac      : int -> tactic
-IntPr.best_tac      : int -> tactic
-\end{ttbox}
-Most of these belong to the structure \ML{} structure \texttt{IntPr} and resemble the
-tactics of Isabelle's classical reasoner.  There are no corresponding
-Isar methods, but you can use the 
-\isa{tactic} method to call \ML{} tactics in an Isar script:
-\begin{isabelle}
-\isacommand{apply}\ (tactic\ {* IntPr.fast\_tac 1*})
-\end{isabelle}
-Here is a description of each tactic:
-
-\begin{ttdescription}
-\item[\ttindexbold{mp_tac} {\it i}] 
-attempts to use \tdx{notE} or \tdx{impE} within the assumptions in
-subgoal $i$.  For each assumption of the form $\neg P$ or $P\imp Q$, it
-searches for another assumption unifiable with~$P$.  By
-contradiction with $\neg P$ it can solve the subgoal completely; by Modus
-Ponens it can replace the assumption $P\imp Q$ by $Q$.  The tactic can
-produce multiple outcomes, enumerating all suitable pairs of assumptions.
-
-\item[\ttindexbold{eq_mp_tac} {\it i}] 
-is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
-is safe.
-
-\item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
-subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
-care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
-
-\item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
-subgoals.  It is deterministic, with at most one outcome.
-
-\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
-but allows unknowns to be instantiated.
-
-\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
-    inst_step_tac}, or applies an unsafe rule.  This is the basic step of
-  the intuitionistic proof procedure.
-
-\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
-depth-first search, to solve subgoal~$i$.
-
-\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
-best-first search (guided by the size of the proof state) to solve subgoal~$i$.
-\end{ttdescription}
-Here are some of the theorems that \texttt{IntPr.fast_tac} proves
-automatically.  The latter three date from {\it Principia Mathematica}
-(*11.53, *11.55, *11.61)~\cite{principia}.
-\begin{ttbox}
-~~P & ~~(P --> Q) --> ~~Q
-(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))
-(EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))
-(EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))
-\end{ttbox}
-
-
-
-\begin{figure} 
-\begin{ttbox}
-\tdx{excluded_middle}    ~P | P
-
-\tdx{disjCI}    (~Q ==> P) ==> P|Q
-\tdx{exCI}      (ALL x. ~P(x) ==> P(a)) ==> EX x.P(x)
-\tdx{impCE}     [| P-->Q; ~P ==> R; Q ==> R |] ==> R
-\tdx{iffCE}     [| P<->Q;  [| P; Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
-\tdx{notnotD}   ~~P ==> P
-\tdx{swap}      ~P ==> (~Q ==> P) ==> Q
-\end{ttbox}
-\caption{Derived rules for classical logic} \label{fol-cla-derived}
-\end{figure}
-
-
-\section{Classical proof procedures} \label{fol-cla-prover}
-The classical theory, \thydx{FOL}, consists of intuitionistic logic plus
-the rule
-$$ \vcenter{\infer{P}{\infer*{P}{[\neg P]}}} \eqno(classical) $$
-\noindent
-Natural deduction in classical logic is not really all that natural.  FOL
-derives classical introduction rules for $\disj$ and~$\exists$, as well as
-classical elimination rules for~$\imp$ and~$\bimp$, and the swap rule (see
-Fig.\ts\ref{fol-cla-derived}).
-
-The classical reasoner is installed.  The most useful methods are
-\isa{blast} (pure classical reasoning) and \isa{auto} (classical reasoning
-combined with simplification), but the full range of
-methods is available, including \isa{clarify},
-\isa{fast}, \isa{best} and \isa{force}. 
- See the 
-\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
-        {Chap.\ts\ref{chap:classical}} 
-and the \emph{Tutorial}~\cite{isa-tutorial}
-for more discussion of classical proof methods.
-
-
-\section{An intuitionistic example}
-Here is a session similar to one in the book {\em Logic and Computation}
-\cite[pages~222--3]{paulson87}. It illustrates the treatment of
-quantifier reasoning, which is different in Isabelle than it is in
-{\sc lcf}-based theorem provers such as {\sc hol}.  
-
-The theory header specifies that we are working in intuitionistic
-logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
-theory:
-\begin{isabelle}
-\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
-\isacommand{begin}
-\end{isabelle}
-The proof begins by entering the goal, then applying the rule $({\imp}I)$.
-\begin{isabelle}
-\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
-\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
-\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
-\isanewline
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymforall x.\ \isasymexists y.\ Q(x,\ y)
-\end{isabelle}
-Isabelle's output is shown as it would appear using Proof General.
-In this example, we shall never have more than one subgoal.  Applying
-$({\imp}I)$ replaces~\isa{\isasymlongrightarrow}
-by~\isa{\isasymLongrightarrow}, so that
-\(\ex{y}\all{x}Q(x,y)\) becomes an assumption.  We have the choice of
-$({\exists}E)$ and $({\forall}I)$; let us try the latter.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ allI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)\hfill\((*)\)
-\end{isabelle}
-Applying $({\forall}I)$ replaces the \isa{\isasymforall
-x} (in ASCII, \isa{ALL~x}) by \isa{\isasymAnd x}
-(or \isa{!!x}), replacing FOL's universal quantifier by Isabelle's 
-meta universal quantifier.  The bound variable is a {\bf parameter} of
-the subgoal.  We now must choose between $({\exists}I)$ and
-$({\exists}E)$.  What happens if the wrong rule is chosen?
-\begin{isabelle}
-\isacommand{apply}\ (rule\ exI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ Q(x,\ ?y2(x))
-\end{isabelle}
-The new subgoal~1 contains the function variable \isa{?y2}.  Instantiating
-\isa{?y2} can replace~\isa{?y2(x)} by a term containing~\isa{x}, even
-though~\isa{x} is a bound variable.  Now we analyse the assumption
-\(\exists y.\forall x. Q(x,y)\) using elimination rules:
-\begin{isabelle}
-\isacommand{apply}\ (erule\ exE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\ \isasymLongrightarrow \ Q(x,\ ?y2(x))
-\end{isabelle}
-Applying $(\exists E)$ has produced the parameter \isa{y} and stripped the
-existential quantifier from the assumption.  But the subgoal is unprovable:
-there is no way to unify \isa{?y2(x)} with the bound variable~\isa{y}.
-Using Proof General, we can return to the critical point, marked
-$(*)$ above.  This time we apply $({\exists}E)$:
-\begin{isabelle}
-\isacommand{apply}\ (erule\ exE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)
-\end{isabelle}
-We now have two parameters and no scheme variables.  Applying
-$({\exists}I)$ and $({\forall}E)$ produces two scheme variables, which are
-applied to those parameters.  Parameters should be produced early, as this
-example demonstrates.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ exI)\isanewline
-\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
-\isasymLongrightarrow \ Q(x,\ ?y3(x,\ y))
-\isanewline
-\isacommand{apply}\ (erule\ allE)\isanewline
-\ 1.\ \isasymAnd x\ y.\ Q(?x4(x,\ y),\ y)\ \isasymLongrightarrow \
-Q(x,\ ?y3(x,\ y))
-\end{isabelle}
-The subgoal has variables \isa{?y3} and \isa{?x4} applied to both
-parameters.  The obvious projection functions unify \isa{?x4(x,y)} with~\isa{
-x} and \isa{?y3(x,y)} with~\isa{y}.
-\begin{isabelle}
-\isacommand{apply}\ assumption\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-The theorem was proved in six method invocations, not counting the
-abandoned ones.  But proof checking is tedious, and the \ML{} tactic
-\ttindex{IntPr.fast_tac} can prove the theorem in one step.
-\begin{isabelle}
-\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
-\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
-\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
-\isanewline
-\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
-No\ subgoals!
-\end{isabelle}
-
-
-\section{An example of intuitionistic negation}
-The following example demonstrates the specialized forms of implication
-elimination.  Even propositional formulae can be difficult to prove from
-the basic rules; the specialized rules help considerably.  
-
-Propositional examples are easy to invent.  As Dummett notes~\cite[page
-28]{dummett}, $\neg P$ is classically provable if and only if it is
-intuitionistically provable;  therefore, $P$ is classically provable if and
-only if $\neg\neg P$ is intuitionistically provable.%
-\footnote{This remark holds only for propositional logic, not if $P$ is
-  allowed to contain quantifiers.}
-%
-Proving $\neg\neg P$ intuitionistically is
-much harder than proving~$P$ classically.
-
-Our example is the double negation of the classical tautology $(P\imp
-Q)\disj (Q\imp P)$.  The first step is apply the
-\isa{unfold} method, which expands
-negations to implications using the definition $\neg P\equiv P\imp\bot$
-and allows use of the special implication rules.
-\begin{isabelle}
-\isacommand{lemma}\ "\isachartilde \ \isachartilde \ ((P-->Q)\ |\ (Q-->P))"\isanewline
-\ 1.\ \isasymnot \ \isasymnot \ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P))
-\isanewline
-\isacommand{apply}\ (unfold\ not\_def)\isanewline
-\ 1.\ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False)\ \isasymlongrightarrow \ False%
-\end{isabelle}
-The next step is a trivial use of $(\imp I)$.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ (P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\ \isasymLongrightarrow \ False%
-\end{isabelle}
-By $(\imp E)$ it would suffice to prove $(P\imp Q)\disj (Q\imp P)$, but
-that formula is not a theorem of intuitionistic logic.  Instead, we
-apply the specialized implication rule \tdx{disj_impE}.  It splits the
-assumption into two assumptions, one for each disjunct.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ disj\_impE)\isanewline
-\ 1.\ \isasymlbrakk (P\ \isasymlongrightarrow \ Q)\ \isasymlongrightarrow \ False;\ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-We cannot hope to prove $P\imp Q$ or $Q\imp P$ separately, but
-their negations are inconsistent.  Applying \tdx{imp_impE} breaks down
-the assumption $\neg(P\imp Q)$, asking to show~$Q$ while providing new
-assumptions~$P$ and~$\neg Q$.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ imp\_impE)\isanewline
-\ 1.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ P;\ Q\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-Subgoal~2 holds trivially; let us ignore it and continue working on
-subgoal~1.  Thanks to the assumption~$P$, we could prove $Q\imp P$;
-applying \tdx{imp_impE} is simpler.
-\begin{isabelle}
-\ \isacommand{apply}\ (erule\ imp\_impE)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ Q;\ P\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ P\isanewline
-\ 2.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 3.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ False%
-\end{isabelle}
-The three subgoals are all trivial.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\
-False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\
-\isasymlongrightarrow \ False;\ False\isasymrbrakk \
-\isasymLongrightarrow \ False%
-\isanewline
-\isacommand{apply}\ (erule\ FalseE)+\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-This proof is also trivial for the \ML{} tactic \isa{IntPr.fast_tac}.
-
-
-\section{A classical example} \label{fol-cla-example}
-To illustrate classical logic, we shall prove the theorem
-$\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
-follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
-$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we must
-work in a theory based on classical logic, the theory \isa{FOL}:
-\begin{isabelle}
-\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
-\isacommand{begin}
-\end{isabelle}
-
-The formal proof does not conform in any obvious way to the sketch given
-above.  Its key step is its first rule, \tdx{exCI}, a classical
-version of~$(\exists I)$ that allows multiple instantiation of the
-quantifier.
-\begin{isabelle}
-\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ (rule\ exCI)\isanewline
-\ 1.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ \isasymforall x.\ P(?a)\ \isasymlongrightarrow \ P(x)
-\end{isabelle}
-We can either exhibit a term \isa{?a} to satisfy the conclusion of
-subgoal~1, or produce a contradiction from the assumption.  The next
-steps are routine.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ allI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ P(?a)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x));\ P(?a)\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\end{isabelle}
-By the duality between $\exists$ and~$\forall$, applying~$(\forall E)$
-is equivalent to applying~$(\exists I)$ again.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ allE)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk P(?a);\ \isasymnot \ (\isasymforall xa.\ P(?y3(x))\ \isasymlongrightarrow \ P(xa))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\end{isabelle}
-In classical logic, a negated assumption is equivalent to a conclusion.  To
-get this effect, we create a swapped version of $(\forall I)$ and apply it
-using \ttindex{erule}.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ allI\ [THEN\ [2]\ swap])\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x)\isasymrbrakk \ \isasymLongrightarrow \ P(?y3(x))\ \isasymlongrightarrow \ P(xa)
-\end{isabelle}
-The operand of \isa{erule} above denotes the following theorem:
-\begin{isabelle}
-\ \ \ \ \isasymlbrakk \isasymnot \ (\isasymforall x.\ ?P1(x));\
-\isasymAnd x.\ \isasymnot \ ?P\ \isasymLongrightarrow \
-?P1(x)\isasymrbrakk \
-\isasymLongrightarrow \ ?P%
-\end{isabelle}
-The previous conclusion, \isa{P(x)}, has become a negated assumption.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ impI)\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(xa)
-\end{isabelle}
-The subgoal has three assumptions.  We produce a contradiction between the
-\index{assumptions!contradictory} assumptions~\isa{\isasymnot P(x)}
-and~\isa{P(?y3(x))}.   The proof never instantiates the
-unknown~\isa{?a}.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ notE)\isanewline
-\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
-\isanewline
-\isacommand{apply}\ assumption\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-The civilised way to prove this theorem is using the
-\methdx{blast} method, which automatically uses the classical form
-of the rule~$(\exists I)$:
-\begin{isabelle}
-\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
-\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
-\isanewline
-\isacommand{by}\ blast\isanewline
-No\ subgoals!
-\end{isabelle}
-If this theorem seems counterintuitive, then perhaps you are an
-intuitionist.  In constructive logic, proving $\ex{y}\all{x}P(y)\imp P(x)$
-requires exhibiting a particular term~$t$ such that $\all{x}P(t)\imp P(x)$,
-which we cannot do without further knowledge about~$P$.
-
-
-\section{Derived rules and the classical tactics}
-Classical first-order logic can be extended with the propositional
-connective $if(P,Q,R)$, where 
-$$ if(P,Q,R) \equiv P\conj Q \disj \neg P \conj R. \eqno(if) $$
-Theorems about $if$ can be proved by treating this as an abbreviation,
-replacing $if(P,Q,R)$ by $P\conj Q \disj \neg P \conj R$ in subgoals.  But
-this duplicates~$P$, causing an exponential blowup and an unreadable
-formula.  Introducing further abbreviations makes the problem worse.
-
-Natural deduction demands rules that introduce and eliminate $if(P,Q,R)$
-directly, without reference to its definition.  The simple identity
-\[ if(P,Q,R) \,\bimp\, (P\imp Q)\conj (\neg P\imp R) \]
-suggests that the
-$if$-introduction rule should be
-\[ \infer[({if}\,I)]{if(P,Q,R)}{\infer*{Q}{[P]}  &  \infer*{R}{[\neg P]}} \]
-The $if$-elimination rule reflects the definition of $if(P,Q,R)$ and the
-elimination rules for~$\disj$ and~$\conj$.
-\[ \infer[({if}\,E)]{S}{if(P,Q,R) & \infer*{S}{[P,Q]}
-                                  & \infer*{S}{[\neg P,R]}} 
-\]
-Having made these plans, we get down to work with Isabelle.  The theory of
-classical logic, \texttt{FOL}, is extended with the constant
-$if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
-equation~$(if)$.
-\begin{isabelle}
-\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
-\isacommand{begin}\isanewline
-\isacommand{constdefs}\isanewline
-\ \ if\ ::\ "[o,o,o]=>o"\isanewline
-\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
-\end{isabelle}
-We create the file \texttt{If.thy} containing these declarations.  (This file
-is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
-\begin{isabelle}
-use_thy "If";  
-\end{isabelle}
-loads that theory and sets it to be the current context.
-
-
-\subsection{Deriving the introduction rule}
-
-The derivations of the introduction and elimination rules demonstrate the
-methods for rewriting with definitions.  Classical reasoning is required,
-so we use \isa{blast}.
-
-The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
-concludes $if(P,Q,R)$.  We propose this lemma and immediately simplify
-using \isa{if\_def} to expand the definition of \isa{if} in the
-subgoal.
-\begin{isabelle}
-\isacommand{lemma}\ ifI: "[|\ P\ ==>\ Q;\ \isachartilde P\ ==>\ R\
-|]\ ==>\ if(P,Q,R)"\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ Q,\ R)
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \
-R
-\end{isabelle}
-The rule's premises, although expressed using meta-level implication
-(\isa{\isasymLongrightarrow}) are passed as ordinary implications to
-\methdx{blast}.  
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-
-\subsection{Deriving the elimination rule}
-The elimination rule has three premises, two of which are themselves rules.
-The conclusion is simply $S$.
-\begin{isabelle}
-\isacommand{lemma}\ ifE:\isanewline
-\ \ \ "[|\ if(P,Q,R);\ \ [|P;\ Q|]\ ==>\ S;\ [|\isachartilde P;\ R|]\ ==>\ S\ |]\ ==>\ S"\isanewline
-\ 1.\ \isasymlbrakk if(P,\ Q,\ R);\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ \isasymlbrakk P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R;\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
-\end{isabelle}
-The proof script is the same as before: \isa{simp} followed by
-\isa{blast}:
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-
-\subsection{Using the derived rules}
-Our new derived rules, \tdx{ifI} and~\tdx{ifE}, permit natural
-proofs of theorems such as the following:
-\begin{eqnarray*}
-    if(P, if(Q,A,B), if(Q,C,D)) & \bimp & if(Q,if(P,A,C),if(P,B,D)) \\
-    if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
-\end{eqnarray*}
-Proofs also require the classical reasoning rules and the $\bimp$
-introduction rule (called~\tdx{iffI}: do not confuse with~\isa{ifI}). 
-
-To display the $if$-rules in action, let us analyse a proof step by step.
-\begin{isabelle}
-\isacommand{lemma}\ if\_commute:\isanewline
-\ \ \ \ \ "if(P,\ if(Q,A,B),\
-if(Q,C,D))\ <->\ if(Q,\ if(P,A,C),\ if(P,B,D))"\isanewline
-\isacommand{apply}\ (rule\ iffI)\isanewline
-\ 1.\ if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 1.\ }if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 2.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-The $if$-elimination rule can be applied twice in succession.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ ifE)\isanewline
-\ 1.\ \isasymlbrakk P;\ if(Q,\ A,\ B)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 3.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 3.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\isanewline
-\isacommand{apply}\ (erule\ ifE)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 2.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 3.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 4.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 4.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-%
-In the first two subgoals, all assumptions have been reduced to atoms.  Now
-$if$-introduction can be applied.  Observe how the $if$-rules break down
-occurrences of $if$ when they become the outermost connective.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ ifI)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ A,\ C)\isanewline
-\ 2.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
-\ 3.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 4.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 5.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 5.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\isanewline
-\isacommand{apply}\ (rule\ ifI)\isanewline
-\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ P\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 2.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \ C\isanewline
-\ 3.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
-\ 4.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 5.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
-\ 6.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
-\isaindent{\ 6.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
-\end{isabelle}
-Where do we stand?  The first subgoal holds by assumption; the second and
-third, by contradiction.  This is getting tedious.  We could use the classical
-reasoner, but first let us extend the default claset with the derived rules
-for~$if$.
-\begin{isabelle}
-\isacommand{declare}\ ifI\ [intro!]\isanewline
-\isacommand{declare}\ ifE\ [elim!]
-\end{isabelle}
-With these declarations, we could have proved this theorem with a single
-call to \isa{blast}.  Here is another example:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
-\isanewline
-\isacommand{by}\ blast
-\end{isabelle}
-
-
-\subsection{Derived rules versus definitions}
-Dispensing with the derived rules, we can treat $if$ as an
-abbreviation, and let \ttindex{blast_tac} prove the expanded formula.  Let
-us redo the previous proof:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
-\end{isabelle}
-This time, we simply unfold using the definition of $if$:
-\begin{isabelle}
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
-\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ A\ \isasymor \ \isasymnot \ R\ \isasymand \ B)
-\end{isabelle}
-We are left with a subgoal in pure first-order logic, and it falls to
-\isa{blast}:
-\begin{isabelle}
-\isacommand{apply}\ blast\isanewline
-No\ subgoals!
-\end{isabelle}
-Expanding definitions reduces the extended logic to the base logic.  This
-approach has its merits, but it can be slow.  In these examples, proofs
-using the derived rules for~\isa{if} run about six
-times faster  than proofs using just the rules of first-order logic.
-
-Expanding definitions can also make it harder to diagnose errors. 
-Suppose we are having difficulties in proving some goal.  If by expanding
-definitions we have made it unreadable, then we have little hope of
-diagnosing the problem.
-
-Attempts at program verification often yield invalid assertions.
-Let us try to prove one:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
-A))
-\end{isabelle}
-Calling \isa{blast} yields an uninformative failure message. We can
-get a closer look at the situation by applying \methdx{auto}.
-\begin{isabelle}
-\isacommand{apply}\ auto\isanewline
-\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
-\ 2.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 3.\ \isasymlbrakk B;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
-\ 4.\ \isasymlbrakk \isasymnot \ R;\ A;\ \isasymnot \ B;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \
-False
-\end{isabelle}
-Subgoal~1 is unprovable and yields a countermodel: $P$ and~$B$ are false
-while~$R$ and~$A$ are true.  This truth assignment reduces the main goal to
-$true\bimp false$, which is of course invalid.
-
-We can repeat this analysis by expanding definitions, using just the rules of
-first-order logic:
-\begin{isabelle}
-\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
-\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
-A))
-\isanewline
-\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
-\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
-\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ B\ \isasymor \ \isasymnot \ R\ \isasymand \ A)
-\end{isabelle}
-Again \isa{blast} would fail, so we try \methdx{auto}:
-\begin{isabelle}
-\isacommand{apply}\ (auto)\ \isanewline
-\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
-\ 2.\ \isasymlbrakk A;\ \isasymnot \ P;\ R;\ \isasymnot \ B\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
-\ 3.\ \isasymlbrakk B;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 4.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ A;\ \isasymnot \ R;\ Q\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 5.\ \isasymlbrakk B;\ \isasymnot \ Q;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 6.\ \isasymlbrakk B;\ \isasymnot \ A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 7.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
-\ 8.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ Q%
-\end{isabelle}
-Subgoal~1 yields the same countermodel as before.  But each proof step has
-taken six times as long, and the final result contains twice as many subgoals.
-
-Expanding your definitions usually makes proofs more difficult.  This is
-why the classical prover has been designed to accept derived rules.
-
-\index{first-order logic|)}
--- a/doc-src/ZF/ZF.tex	Mon Aug 27 21:37:34 2012 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2717 +0,0 @@
-\chapter{Zermelo-Fraenkel Set Theory}
-\index{set theory|(}
-
-The theory~\thydx{ZF} implements Zermelo-Fraenkel set
-theory~\cite{halmos60,suppes72} as an extension of~\texttt{FOL}, classical
-first-order logic.  The theory includes a collection of derived natural
-deduction rules, for use with Isabelle's classical reasoner.  Some
-of it is based on the work of No\"el~\cite{noel}.
-
-A tremendous amount of set theory has been formally developed, including the
-basic properties of relations, functions, ordinals and cardinals.  Significant
-results have been proved, such as the Schr\"oder-Bernstein Theorem, the
-Wellordering Theorem and a version of Ramsey's Theorem.  \texttt{ZF} provides
-both the integers and the natural numbers.  General methods have been
-developed for solving recursion equations over monotonic functors; these have
-been applied to yield constructions of lists, trees, infinite lists, etc.
-
-\texttt{ZF} has a flexible package for handling inductive definitions,
-such as inference systems, and datatype definitions, such as lists and
-trees.  Moreover it handles coinductive definitions, such as
-bisimulation relations, and codatatype definitions, such as streams.  It
-provides a streamlined syntax for defining primitive recursive functions over
-datatypes. 
-
-Published articles~\cite{paulson-set-I,paulson-set-II} describe \texttt{ZF}
-less formally than this chapter.  Isabelle employs a novel treatment of
-non-well-founded data structures within the standard {\sc zf} axioms including
-the Axiom of Foundation~\cite{paulson-mscs}.
-
-
-\section{Which version of axiomatic set theory?}
-The two main axiom systems for set theory are Bernays-G\"odel~({\sc bg})
-and Zermelo-Fraenkel~({\sc zf}).  Resolution theorem provers can use {\sc
-  bg} because it is finite~\cite{boyer86,quaife92}.  {\sc zf} does not
-have a finite axiom system because of its Axiom Scheme of Replacement.
-This makes it awkward to use with many theorem provers, since instances
-of the axiom scheme have to be invoked explicitly.  Since Isabelle has no
-difficulty with axiom schemes, we may adopt either axiom system.
-
-These two theories differ in their treatment of {\bf classes}, which are
-collections that are `too big' to be sets.  The class of all sets,~$V$,
-cannot be a set without admitting Russell's Paradox.  In {\sc bg}, both
-classes and sets are individuals; $x\in V$ expresses that $x$ is a set.  In
-{\sc zf}, all variables denote sets; classes are identified with unary
-predicates.  The two systems define essentially the same sets and classes,
-with similar properties.  In particular, a class cannot belong to another
-class (let alone a set).
-
-Modern set theorists tend to prefer {\sc zf} because they are mainly concerned
-with sets, rather than classes.  {\sc bg} requires tiresome proofs that various
-collections are sets; for instance, showing $x\in\{x\}$ requires showing that
-$x$ is a set.
-
-
-\begin{figure} \small
-\begin{center}
-\begin{tabular}{rrr} 
-  \it name      &\it meta-type  & \it description \\ 
-  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder\\
-  \cdx{0}       & $i$           & empty set\\
-  \cdx{cons}    & $[i,i]\To i$  & finite set constructor\\
-  \cdx{Upair}   & $[i,i]\To i$  & unordered pairing\\
-  \cdx{Pair}    & $[i,i]\To i$  & ordered pairing\\
-  \cdx{Inf}     & $i$   & infinite set\\
-  \cdx{Pow}     & $i\To i$      & powerset\\
-  \cdx{Union} \cdx{Inter} & $i\To i$    & set union/intersection \\
-  \cdx{split}   & $[[i,i]\To i, i] \To i$ & generalized projection\\
-  \cdx{fst} \cdx{snd}   & $i\To i$      & projections\\
-  \cdx{converse}& $i\To i$      & converse of a relation\\
-  \cdx{succ}    & $i\To i$      & successor\\
-  \cdx{Collect} & $[i,i\To o]\To i$     & separation\\
-  \cdx{Replace} & $[i, [i,i]\To o] \To i$       & replacement\\
-  \cdx{PrimReplace} & $[i, [i,i]\To o] \To i$   & primitive replacement\\
-  \cdx{RepFun}  & $[i, i\To i] \To i$   & functional replacement\\
-  \cdx{Pi} \cdx{Sigma}  & $[i,i\To i]\To i$     & general product/sum\\
-  \cdx{domain}  & $i\To i$      & domain of a relation\\
-  \cdx{range}   & $i\To i$      & range of a relation\\
-  \cdx{field}   & $i\To i$      & field of a relation\\
-  \cdx{Lambda}  & $[i, i\To i]\To i$    & $\lambda$-abstraction\\
-  \cdx{restrict}& $[i, i] \To i$        & restriction of a function\\
-  \cdx{The}     & $[i\To o]\To i$       & definite description\\
-  \cdx{if}      & $[o,i,i]\To i$        & conditional\\
-  \cdx{Ball} \cdx{Bex}  & $[i, i\To o]\To o$    & bounded quantifiers
-\end{tabular}
-\end{center}
-\subcaption{Constants}
-
-\begin{center}
-\index{*"`"` symbol}
-\index{*"-"`"` symbol}
-\index{*"` symbol}\index{function applications}
-\index{*"- symbol}
-\index{*": symbol}
-\index{*"<"= symbol}
-\begin{tabular}{rrrr} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \tt ``        & $[i,i]\To i$  &  Left 90      & image \\
-  \tt -``       & $[i,i]\To i$  &  Left 90      & inverse image \\
-  \tt `         & $[i,i]\To i$  &  Left 90      & application \\
-  \sdx{Int}     & $[i,i]\To i$  &  Left 70      & intersection ($\int$) \\
-  \sdx{Un}      & $[i,i]\To i$  &  Left 65      & union ($\un$) \\
-  \tt -         & $[i,i]\To i$  &  Left 65      & set difference ($-$) \\[1ex]
-  \tt:          & $[i,i]\To o$  &  Left 50      & membership ($\in$) \\
-  \tt <=        & $[i,i]\To o$  &  Left 50      & subset ($\subseteq$) 
-\end{tabular}
-\end{center}
-\subcaption{Infixes}
-\caption{Constants of ZF} \label{zf-constants}
-\end{figure} 
-
-
-\section{The syntax of set theory}
-The language of set theory, as studied by logicians, has no constants.  The
-traditional axioms merely assert the existence of empty sets, unions,
-powersets, etc.; this would be intolerable for practical reasoning.  The
-Isabelle theory declares constants for primitive sets.  It also extends
-\texttt{FOL} with additional syntax for finite sets, ordered pairs,
-comprehension, general union/intersection, general sums/products, and
-bounded quantifiers.  In most other respects, Isabelle implements precisely
-Zermelo-Fraenkel set theory.
-
-Figure~\ref{zf-constants} lists the constants and infixes of~ZF, while
-Figure~\ref{zf-trans} presents the syntax translations.  Finally,
-Figure~\ref{zf-syntax} presents the full grammar for set theory, including the
-constructs of FOL.
-
-Local abbreviations can be introduced by a \isa{let} construct whose
-syntax appears in Fig.\ts\ref{zf-syntax}.  Internally it is translated into
-the constant~\cdx{Let}.  It can be expanded by rewriting with its
-definition, \tdx{Let_def}.
-
-Apart from \isa{let}, set theory does not use polymorphism.  All terms in
-ZF have type~\tydx{i}, which is the type of individuals and has
-class~\cldx{term}.  The type of first-order formulae, remember, 
-is~\tydx{o}.
-
-Infix operators include binary union and intersection ($A\un B$ and
-$A\int B$), set difference ($A-B$), and the subset and membership
-relations.  Note that $a$\verb|~:|$b$ is translated to $\lnot(a\in b)$,
-which is equivalent to  $a\notin b$.  The
-union and intersection operators ($\bigcup A$ and $\bigcap A$) form the
-union or intersection of a set of sets; $\bigcup A$ means the same as
-$\bigcup@{x\in A}x$.  Of these operators, only $\bigcup A$ is primitive.
-
-The constant \cdx{Upair} constructs unordered pairs; thus \isa{Upair($A$,$B$)} denotes the set~$\{A,B\}$ and
-\isa{Upair($A$,$A$)} denotes the singleton~$\{A\}$.  General union is
-used to define binary union.  The Isabelle version goes on to define
-the constant
-\cdx{cons}:
-\begin{eqnarray*}
-   A\cup B              & \equiv &       \bigcup(\isa{Upair}(A,B)) \\
-   \isa{cons}(a,B)      & \equiv &        \isa{Upair}(a,a) \un B
-\end{eqnarray*}
-The $\{a@1, \ldots\}$ notation abbreviates finite sets constructed in the
-obvious manner using~\isa{cons} and~$\emptyset$ (the empty set) \isasymin \begin{eqnarray*}
- \{a,b,c\} & \equiv & \isa{cons}(a,\isa{cons}(b,\isa{cons}(c,\emptyset)))
-\end{eqnarray*}
-
-The constant \cdx{Pair} constructs ordered pairs, as in \isa{Pair($a$,$b$)}.  Ordered pairs may also be written within angle brackets,
-as {\tt<$a$,$b$>}.  The $n$-tuple {\tt<$a@1$,\ldots,$a@{n-1}$,$a@n$>}
-abbreviates the nest of pairs\par\nobreak
-\centerline{\isa{Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots).}}
-
-In ZF, a function is a set of pairs.  A ZF function~$f$ is simply an
-individual as far as Isabelle is concerned: its Isabelle type is~$i$, not say
-$i\To i$.  The infix operator~{\tt`} denotes the application of a function set
-to its argument; we must write~$f{\tt`}x$, not~$f(x)$.  The syntax for image
-is~$f{\tt``}A$ and that for inverse image is~$f{\tt-``}A$.
-
-
-\begin{figure} 
-\index{lambda abs@$\lambda$-abstractions}
-\index{*"-"> symbol}
-\index{*"* symbol}
-\begin{center} \footnotesize\tt\frenchspacing
-\begin{tabular}{rrr} 
-  \it external          & \it internal  & \it description \\ 
-  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm negated membership\\
-  \ttlbrace$a@1$, $\ldots$, $a@n$\ttrbrace  &  cons($a@1$,$\ldots$,cons($a@n$,0)) &
-        \rm finite set \\
-  <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
-        Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
-        \rm ordered $n$-tuple \\
-  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
-        \rm separation \\
-  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
-        \rm replacement \\
-  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
-        \rm functional replacement \\
-  \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general intersection \\
-  \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
-        \rm general union \\
-  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
-        \rm general product \\
-  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
-        \rm general sum \\
-  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
-        \rm function space \\
-  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
-        \rm binary product \\
-  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
-        \rm definite description \\
-  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
-        \rm $\lambda$-abstraction\\[1ex]
-  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\forall$ \\
-  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
-        \rm bounded $\exists$
-\end{tabular}
-\end{center}
-\caption{Translations for ZF} \label{zf-trans}
-\end{figure} 
-
-
-\begin{figure} 
-\index{*let symbol}
-\index{*in symbol}
-\dquotes
-\[\begin{array}{rcl}
-    term & = & \hbox{expression of type~$i$} \\
-         & | & "let"~id~"="~term";"\dots";"~id~"="~term~"in"~term \\
-         & | & "if"~term~"then"~term~"else"~term \\
-         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
-         & | & "< "  term\; ("," term)^* " >"  \\
-         & | & "{\ttlbrace} " id ":" term " . " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " id " . " id ":" term ", " formula " {\ttrbrace}" \\
-         & | & "{\ttlbrace} " term " . " id ":" term " {\ttrbrace}" \\
-         & | & term " `` " term \\
-         & | & term " -`` " term \\
-         & | & term " ` " term \\
-         & | & term " * " term \\
-         & | & term " \isasyminter " term \\
-         & | & term " \isasymunion " term \\
-         & | & term " - " term \\
-         & | & term " -> " term \\
-         & | & "THE~~"  id  " . " formula\\
-         & | & "lam~~"  id ":" term " . " term \\
-         & | & "INT~~"  id ":" term " . " term \\
-         & | & "UN~~~"  id ":" term " . " term \\
-         & | & "PROD~"  id ":" term " . " term \\
-         & | & "SUM~~"  id ":" term " . " term \\[2ex]
- formula & = & \hbox{expression of type~$o$} \\
-         & | & term " : " term \\
-         & | & term " \ttilde: " term \\
-         & | & term " <= " term \\
-         & | & term " = " term \\
-         & | & term " \ttilde= " term \\
-         & | & "\ttilde\ " formula \\
-         & | & formula " \& " formula \\
-         & | & formula " | " formula \\
-         & | & formula " --> " formula \\
-         & | & formula " <-> " formula \\
-         & | & "ALL " id ":" term " . " formula \\
-         & | & "EX~~" id ":" term " . " formula \\
-         & | & "ALL~" id~id^* " . " formula \\
-         & | & "EX~~" id~id^* " . " formula \\
-         & | & "EX!~" id~id^* " . " formula
-  \end{array}
-\]
-\caption{Full grammar for ZF} \label{zf-syntax}
-\end{figure} 
-
-
-\section{Binding operators}
-The constant \cdx{Collect} constructs sets by the principle of {\bf
-  separation}.  The syntax for separation is
-\hbox{\tt\ttlbrace$x$:$A$.\ $P[x]$\ttrbrace}, where $P[x]$ is a formula
-that may contain free occurrences of~$x$.  It abbreviates the set \isa{Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
-satisfy~$P[x]$.  Note that \isa{Collect} is an unfortunate choice of
-name: some set theories adopt a set-formation principle, related to
-replacement, called collection.
-
-The constant \cdx{Replace} constructs sets by the principle of {\bf
-  replacement}.  The syntax
-\hbox{\tt\ttlbrace$y$.\ $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set 
-\isa{Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
-that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
-has the condition that $Q$ must be single-valued over~$A$: for
-all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
-single-valued binary predicate is also called a {\bf class function}.
-
-The constant \cdx{RepFun} expresses a special case of replacement,
-where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
-single-valued, since it is just the graph of the meta-level
-function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
-for~$x\in A$.  This is analogous to the \ML{} functional \isa{map},
-since it applies a function to every element of a set.  The syntax is
-\isa{\ttlbrace$b[x]$.\ $x$:$A$\ttrbrace}, which expands to 
-\isa{RepFun($A$,$\lambda x. b[x]$)}.
-
-\index{*INT symbol}\index{*UN symbol} 
-General unions and intersections of indexed
-families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
-are written \isa{UN $x$:$A$.\ $B[x]$} and \isa{INT $x$:$A$.\ $B[x]$}.
-Their meaning is expressed using \isa{RepFun} as
-\[
-\bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
-\bigcap(\{B[x]. x\in A\}). 
-\]
-General sums $\sum@{x\in A}B[x]$ and products $\prod@{x\in A}B[x]$ can be
-constructed in set theory, where $B[x]$ is a family of sets over~$A$.  They
-have as special cases $A\times B$ and $A\to B$, where $B$ is simply a set.
-This is similar to the situation in Constructive Type Theory (set theory
-has `dependent sets') and calls for similar syntactic conventions.  The
-constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
-products.  Instead of \isa{Sigma($A$,$B$)} and \isa{Pi($A$,$B$)} we may
-write 
-\isa{SUM $x$:$A$.\ $B[x]$} and \isa{PROD $x$:$A$.\ $B[x]$}.  
-\index{*SUM symbol}\index{*PROD symbol}%
-The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
-general sums and products over a constant family.\footnote{Unlike normal
-infix operators, {\tt*} and {\tt->} merely define abbreviations; there are
-no constants~\isa{op~*} and~\isa{op~->}.} Isabelle accepts these
-abbreviations in parsing and uses them whenever possible for printing.
-
-\index{*THE symbol} As mentioned above, whenever the axioms assert the
-existence and uniqueness of a set, Isabelle's set theory declares a constant
-for that set.  These constants can express the {\bf definite description}
-operator~$\iota x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$,
-if such exists.  Since all terms in ZF denote something, a description is
-always meaningful, but we do not know its value unless $P[x]$ defines it
-uniquely.  Using the constant~\cdx{The}, we may write descriptions as 
-\isa{The($\lambda x. P[x]$)} or use the syntax \isa{THE $x$.\ $P[x]$}.
-
-\index{*lam symbol}
-Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
-stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
-this to be a set, the function's domain~$A$ must be given.  Using the
-constant~\cdx{Lambda}, we may express function sets as \isa{Lambda($A$,$\lambda x. b[x]$)} or use the syntax \isa{lam $x$:$A$.\ $b[x]$}.
-
-Isabelle's set theory defines two {\bf bounded quantifiers}:
-\begin{eqnarray*}
-   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
-   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
-\end{eqnarray*}
-The constants~\cdx{Ball} and~\cdx{Bex} are defined
-accordingly.  Instead of \isa{Ball($A$,$P$)} and \isa{Bex($A$,$P$)} we may
-write
-\isa{ALL $x$:$A$.\ $P[x]$} and \isa{EX $x$:$A$.\ $P[x]$}.
-
-
-%%%% ZF.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Let_def}:           Let(s, f) == f(s)
-
-\tdx{Ball_def}:          Ball(A,P) == {\isasymforall}x. x \isasymin A --> P(x)
-\tdx{Bex_def}:           Bex(A,P)  == {\isasymexists}x. x \isasymin A & P(x)
-
-\tdx{subset_def}:        A \isasymsubseteq B  == {\isasymforall}x \isasymin A. x \isasymin B
-\tdx{extension}:         A = B  <->  A \isasymsubseteq B & B \isasymsubseteq A
-
-\tdx{Union_iff}:         A \isasymin Union(C) <-> ({\isasymexists}B \isasymin C. A \isasymin B)
-\tdx{Pow_iff}:           A \isasymin Pow(B) <-> A \isasymsubseteq B
-\tdx{foundation}:        A=0 | ({\isasymexists}x \isasymin A. {\isasymforall}y \isasymin x. y \isasymnotin A)
-
-\tdx{replacement}:       ({\isasymforall}x \isasymin A. {\isasymforall}y z. P(x,y) & P(x,z) --> y=z) ==>
-                   b \isasymin PrimReplace(A,P) <-> ({\isasymexists}x{\isasymin}A. P(x,b))
-\subcaption{The Zermelo-Fraenkel Axioms}
-
-\tdx{Replace_def}: Replace(A,P) == 
-                   PrimReplace(A, \%x y. (\isasymexists!z. P(x,z)) & P(x,y))
-\tdx{RepFun_def}:  RepFun(A,f)  == {\ttlbrace}y . x \isasymin A, y=f(x)\ttrbrace
-\tdx{the_def}:     The(P)       == Union({\ttlbrace}y . x \isasymin {\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
-\tdx{if_def}:      if(P,a,b)    == THE z. P & z=a | ~P & z=b
-\tdx{Collect_def}: Collect(A,P) == {\ttlbrace}y . x \isasymin A, x=y & P(x){\ttrbrace}
-\tdx{Upair_def}:   Upair(a,b)   == 
-               {\ttlbrace}y. x\isasymin{}Pow(Pow(0)), x=0 & y=a | x=Pow(0) & y=b{\ttrbrace}
-\subcaption{Consequences of replacement}
-
-\tdx{Inter_def}:   Inter(A) == {\ttlbrace}x \isasymin Union(A) . {\isasymforall}y \isasymin A. x \isasymin y{\ttrbrace}
-\tdx{Un_def}:      A \isasymunion B  == Union(Upair(A,B))
-\tdx{Int_def}:     A \isasyminter B  == Inter(Upair(A,B))
-\tdx{Diff_def}:    A - B    == {\ttlbrace}x \isasymin A . x \isasymnotin B{\ttrbrace}
-\subcaption{Union, intersection, difference}
-\end{alltt*}
-\caption{Rules and axioms of ZF} \label{zf-rules}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{cons_def}:    cons(a,A) == Upair(a,a) \isasymunion A
-\tdx{succ_def}:    succ(i) == cons(i,i)
-\tdx{infinity}:    0 \isasymin Inf & ({\isasymforall}y \isasymin Inf. succ(y) \isasymin Inf)
-\subcaption{Finite and infinite sets}
-
-\tdx{Pair_def}:      <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
-\tdx{split_def}:     split(c,p) == THE y. {\isasymexists}a b. p=<a,b> & y=c(a,b)
-\tdx{fst_def}:       fst(A)     == split(\%x y. x, p)
-\tdx{snd_def}:       snd(A)     == split(\%x y. y, p)
-\tdx{Sigma_def}:     Sigma(A,B) == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x,y>{\ttrbrace}
-\subcaption{Ordered pairs and Cartesian products}
-
-\tdx{converse_def}: converse(r) == {\ttlbrace}z. w\isasymin{}r, {\isasymexists}x y. w=<x,y> & z=<y,x>{\ttrbrace}
-\tdx{domain_def}:   domain(r)   == {\ttlbrace}x. w \isasymin r, {\isasymexists}y. w=<x,y>{\ttrbrace}
-\tdx{range_def}:    range(r)    == domain(converse(r))
-\tdx{field_def}:    field(r)    == domain(r) \isasymunion range(r)
-\tdx{image_def}:    r `` A      == {\ttlbrace}y\isasymin{}range(r) . {\isasymexists}x \isasymin A. <x,y> \isasymin r{\ttrbrace}
-\tdx{vimage_def}:   r -`` A     == converse(r)``A
-\subcaption{Operations on relations}
-
-\tdx{lam_def}:   Lambda(A,b) == {\ttlbrace}<x,b(x)> . x \isasymin A{\ttrbrace}
-\tdx{apply_def}: f`a         == THE y. <a,y> \isasymin f
-\tdx{Pi_def}: Pi(A,B) == {\ttlbrace}f\isasymin{}Pow(Sigma(A,B)). {\isasymforall}x\isasymin{}A. \isasymexists!y. <x,y>\isasymin{}f{\ttrbrace}
-\tdx{restrict_def}:  restrict(f,A) == lam x \isasymin A. f`x
-\subcaption{Functions and general product}
-\end{alltt*}
-\caption{Further definitions of ZF} \label{zf-defs}
-\end{figure}
-
-
-
-\section{The Zermelo-Fraenkel axioms}
-The axioms appear in Fig.\ts \ref{zf-rules}.  They resemble those
-presented by Suppes~\cite{suppes72}.  Most of the theory consists of
-definitions.  In particular, bounded quantifiers and the subset relation
-appear in other axioms.  Object-level quantifiers and implications have
-been replaced by meta-level ones wherever possible, to simplify use of the
-axioms.  
-
-The traditional replacement axiom asserts
-\[ y \in \isa{PrimReplace}(A,P) \bimp (\exists x\in A. P(x,y)) \]
-subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
-The Isabelle theory defines \cdx{Replace} to apply
-\cdx{PrimReplace} to the single-valued part of~$P$, namely
-\[ (\exists!z. P(x,z)) \conj P(x,y). \]
-Thus $y\in \isa{Replace}(A,P)$ if and only if there is some~$x$ such that
-$P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
-\isa{Replace} is much easier to use than \isa{PrimReplace}; it defines the
-same set, if $P(x,y)$ is single-valued.  The nice syntax for replacement
-expands to \isa{Replace}.
-
-Other consequences of replacement include replacement for 
-meta-level functions
-(\cdx{RepFun}) and definite descriptions (\cdx{The}).
-Axioms for separation (\cdx{Collect}) and unordered pairs
-(\cdx{Upair}) are traditionally assumed, but they actually follow
-from replacement~\cite[pages 237--8]{suppes72}.
-
-The definitions of general intersection, etc., are straightforward.  Note
-the definition of \isa{cons}, which underlies the finite set notation.
-The axiom of infinity gives us a set that contains~0 and is closed under
-successor (\cdx{succ}).  Although this set is not uniquely defined,
-the theory names it (\cdx{Inf}) in order to simplify the
-construction of the natural numbers.
-                                             
-Further definitions appear in Fig.\ts\ref{zf-defs}.  Ordered pairs are
-defined in the standard way, $\pair{a,b}\equiv\{\{a\},\{a,b\}\}$.  Recall
-that \cdx{Sigma}$(A,B)$ generalizes the Cartesian product of two
-sets.  It is defined to be the union of all singleton sets
-$\{\pair{x,y}\}$, for $x\in A$ and $y\in B(x)$.  This is a typical usage of
-general union.
-
-The projections \cdx{fst} and~\cdx{snd} are defined in terms of the
-generalized projection \cdx{split}.  The latter has been borrowed from
-Martin-L\"of's Type Theory, and is often easier to use than \cdx{fst}
-and~\cdx{snd}.
-
-Operations on relations include converse, domain, range, and image.  The
-set $\isa{Pi}(A,B)$ generalizes the space of functions between two sets.
-Note the simple definitions of $\lambda$-abstraction (using
-\cdx{RepFun}) and application (using a definite description).  The
-function \cdx{restrict}$(f,A)$ has the same values as~$f$, but only
-over the domain~$A$.
-
-
-%%%% zf.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{ballI}:     [| !!x. x\isasymin{}A ==> P(x) |] ==> {\isasymforall}x\isasymin{}A. P(x)
-\tdx{bspec}:     [| {\isasymforall}x\isasymin{}A. P(x);  x\isasymin{}A |] ==> P(x)
-\tdx{ballE}:     [| {\isasymforall}x\isasymin{}A. P(x);  P(x) ==> Q;  x \isasymnotin A ==> Q |] ==> Q
-
-\tdx{ball_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
-             ({\isasymforall}x\isasymin{}A. P(x)) <-> ({\isasymforall}x\isasymin{}A'. P'(x))
-
-\tdx{bexI}:      [| P(x);  x\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
-\tdx{bexCI}:     [| {\isasymforall}x\isasymin{}A. ~P(x) ==> P(a);  a\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
-\tdx{bexE}:      [| {\isasymexists}x\isasymin{}A. P(x);  !!x. [| x\isasymin{}A; P(x) |] ==> Q |] ==> Q
-
-\tdx{bex_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
-             ({\isasymexists}x\isasymin{}A. P(x)) <-> ({\isasymexists}x\isasymin{}A'. P'(x))
-\subcaption{Bounded quantifiers}
-
-\tdx{subsetI}:     (!!x. x \isasymin A ==> x \isasymin B) ==> A \isasymsubseteq B
-\tdx{subsetD}:     [| A \isasymsubseteq B;  c \isasymin A |] ==> c \isasymin B
-\tdx{subsetCE}:    [| A \isasymsubseteq B;  c \isasymnotin A ==> P;  c \isasymin B ==> P |] ==> P
-\tdx{subset_refl}:  A \isasymsubseteq A
-\tdx{subset_trans}: [| A \isasymsubseteq B;  B \isasymsubseteq C |] ==> A \isasymsubseteq C
-
-\tdx{equalityI}:   [| A \isasymsubseteq B;  B \isasymsubseteq A |] ==> A = B
-\tdx{equalityD1}:  A = B ==> A \isasymsubseteq B
-\tdx{equalityD2}:  A = B ==> B \isasymsubseteq A
-\tdx{equalityE}:   [| A = B;  [| A \isasymsubseteq B; B \isasymsubseteq A |] ==> P |]  ==>  P
-\subcaption{Subsets and extensionality}
-
-\tdx{emptyE}:        a \isasymin 0 ==> P
-\tdx{empty_subsetI}:  0 \isasymsubseteq A
-\tdx{equals0I}:      [| !!y. y \isasymin A ==> False |] ==> A=0
-\tdx{equals0D}:      [| A=0;  a \isasymin A |] ==> P
-
-\tdx{PowI}:          A \isasymsubseteq B ==> A \isasymin Pow(B)
-\tdx{PowD}:          A \isasymin Pow(B)  ==>  A \isasymsubseteq B
-\subcaption{The empty set; power sets}
-\end{alltt*}
-\caption{Basic derived rules for ZF} \label{zf-lemmas1}
-\end{figure}
-
-
-\section{From basic lemmas to function spaces}
-Faced with so many definitions, it is essential to prove lemmas.  Even
-trivial theorems like $A \int B = B \int A$ would be difficult to
-prove from the definitions alone.  Isabelle's set theory derives many
-rules using a natural deduction style.  Ideally, a natural deduction
-rule should introduce or eliminate just one operator, but this is not
-always practical.  For most operators, we may forget its definition
-and use its derived rules instead.
-
-\subsection{Fundamental lemmas}
-Figure~\ref{zf-lemmas1} presents the derived rules for the most basic
-operators.  The rules for the bounded quantifiers resemble those for the
-ordinary quantifiers, but note that \tdx{ballE} uses a negated assumption
-in the style of Isabelle's classical reasoner.  The \rmindex{congruence
-  rules} \tdx{ball_cong} and \tdx{bex_cong} are required by Isabelle's
-simplifier, but have few other uses.  Congruence rules must be specially
-derived for all binding operators, and henceforth will not be shown.
-
-Figure~\ref{zf-lemmas1} also shows rules for the subset and equality
-relations (proof by extensionality), and rules about the empty set and the
-power set operator.
-
-Figure~\ref{zf-lemmas2} presents rules for replacement and separation.
-The rules for \cdx{Replace} and \cdx{RepFun} are much simpler than
-comparable rules for \isa{PrimReplace} would be.  The principle of
-separation is proved explicitly, although most proofs should use the
-natural deduction rules for \isa{Collect}.  The elimination rule
-\tdx{CollectE} is equivalent to the two destruction rules
-\tdx{CollectD1} and \tdx{CollectD2}, but each rule is suited to
-particular circumstances.  Although too many rules can be confusing, there
-is no reason to aim for a minimal set of rules.  
-
-Figure~\ref{zf-lemmas3} presents rules for general union and intersection.
-The empty intersection should be undefined.  We cannot have
-$\bigcap(\emptyset)=V$ because $V$, the universal class, is not a set.  All
-expressions denote something in ZF set theory; the definition of
-intersection implies $\bigcap(\emptyset)=\emptyset$, but this value is
-arbitrary.  The rule \tdx{InterI} must have a premise to exclude
-the empty intersection.  Some of the laws governing intersections require
-similar premises.
-
-
-%the [p] gives better page breaking for the book
-\begin{figure}[p]
-\begin{alltt*}\isastyleminor
-\tdx{ReplaceI}:   [| x\isasymin{}A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
-            b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace}
-
-\tdx{ReplaceE}:   [| b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace};  
-               !!x. [| x\isasymin{}A; P(x,b); {\isasymforall}y. P(x,y)-->y=b |] ==> R 
-            |] ==> R
-
-\tdx{RepFunI}:    [| a\isasymin{}A |] ==> f(a)\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace}
-\tdx{RepFunE}:    [| b\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace};  
-                !!x.[| x\isasymin{}A;  b=f(x) |] ==> P |] ==> P
-
-\tdx{separation}:  a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} <-> a\isasymin{}A & P(a)
-\tdx{CollectI}:    [| a\isasymin{}A;  P(a) |] ==> a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace}
-\tdx{CollectE}:    [| a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace};  [| a\isasymin{}A; P(a) |] ==> R |] ==> R
-\tdx{CollectD1}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> a\isasymin{}A
-\tdx{CollectD2}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> P(a)
-\end{alltt*}
-\caption{Replacement and separation} \label{zf-lemmas2}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{UnionI}: [| B\isasymin{}C;  A\isasymin{}B |] ==> A\isasymin{}Union(C)
-\tdx{UnionE}: [| A\isasymin{}Union(C);  !!B.[| A\isasymin{}B;  B\isasymin{}C |] ==> R |] ==> R
-
-\tdx{InterI}: [| !!x. x\isasymin{}C ==> A\isasymin{}x;  c\isasymin{}C |] ==> A\isasymin{}Inter(C)
-\tdx{InterD}: [| A\isasymin{}Inter(C);  B\isasymin{}C |] ==> A\isasymin{}B
-\tdx{InterE}: [| A\isasymin{}Inter(C);  A\isasymin{}B ==> R;  B \isasymnotin C ==> R |] ==> R
-
-\tdx{UN_I}:   [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x))
-\tdx{UN_E}:   [| b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x));  !!x.[| x\isasymin{}A;  b\isasymin{}B(x) |] ==> R 
-           |] ==> R
-
-\tdx{INT_I}:  [| !!x. x\isasymin{}A ==> b\isasymin{}B(x);  a\isasymin{}A |] ==> b\isasymin{}({\isasymInter}x\isasymin{}A. B(x))
-\tdx{INT_E}:  [| b\isasymin{}({\isasymInter}x\isasymin{}A. B(x));  a\isasymin{}A |] ==> b\isasymin{}B(a)
-\end{alltt*}
-\caption{General union and intersection} \label{zf-lemmas3}
-\end{figure}
-
-
-%%% upair.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{pairing}:   a\isasymin{}Upair(b,c) <-> (a=b | a=c)
-\tdx{UpairI1}:   a\isasymin{}Upair(a,b)
-\tdx{UpairI2}:   b\isasymin{}Upair(a,b)
-\tdx{UpairE}:    [| a\isasymin{}Upair(b,c);  a=b ==> P;  a=c ==> P |] ==> P
-\end{alltt*}
-\caption{Unordered pairs} \label{zf-upair1}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{UnI1}:      c\isasymin{}A ==> c\isasymin{}A \isasymunion B
-\tdx{UnI2}:      c\isasymin{}B ==> c\isasymin{}A \isasymunion B
-\tdx{UnCI}:      (c \isasymnotin B ==> c\isasymin{}A) ==> c\isasymin{}A \isasymunion B
-\tdx{UnE}:       [| c\isasymin{}A \isasymunion B;  c\isasymin{}A ==> P;  c\isasymin{}B ==> P |] ==> P
-
-\tdx{IntI}:      [| c\isasymin{}A;  c\isasymin{}B |] ==> c\isasymin{}A \isasyminter B
-\tdx{IntD1}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}A
-\tdx{IntD2}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}B
-\tdx{IntE}:      [| c\isasymin{}A \isasyminter B;  [| c\isasymin{}A; c\isasymin{}B |] ==> P |] ==> P
-
-\tdx{DiffI}:     [| c\isasymin{}A;  c \isasymnotin B |] ==> c\isasymin{}A - B
-\tdx{DiffD1}:    c\isasymin{}A - B ==> c\isasymin{}A
-\tdx{DiffD2}:    c\isasymin{}A - B ==> c  \isasymnotin  B
-\tdx{DiffE}:     [| c\isasymin{}A - B;  [| c\isasymin{}A; c \isasymnotin B |] ==> P |] ==> P
-\end{alltt*}
-\caption{Union, intersection, difference} \label{zf-Un}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{consI1}:    a\isasymin{}cons(a,B)
-\tdx{consI2}:    a\isasymin{}B ==> a\isasymin{}cons(b,B)
-\tdx{consCI}:    (a \isasymnotin B ==> a=b) ==> a\isasymin{}cons(b,B)
-\tdx{consE}:     [| a\isasymin{}cons(b,A);  a=b ==> P;  a\isasymin{}A ==> P |] ==> P
-
-\tdx{singletonI}:  a\isasymin{}{\ttlbrace}a{\ttrbrace}
-\tdx{singletonE}:  [| a\isasymin{}{\ttlbrace}b{\ttrbrace}; a=b ==> P |] ==> P
-\end{alltt*}
-\caption{Finite and singleton sets} \label{zf-upair2}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{succI1}:    i\isasymin{}succ(i)
-\tdx{succI2}:    i\isasymin{}j ==> i\isasymin{}succ(j)
-\tdx{succCI}:    (i \isasymnotin j ==> i=j) ==> i\isasymin{}succ(j)
-\tdx{succE}:     [| i\isasymin{}succ(j);  i=j ==> P;  i\isasymin{}j ==> P |] ==> P
-\tdx{succ_neq_0}:  [| succ(n)=0 |] ==> P
-\tdx{succ_inject}: succ(m) = succ(n) ==> m=n
-\end{alltt*}
-\caption{The successor function} \label{zf-succ}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{the_equality}: [| P(a); !!x. P(x) ==> x=a |] ==> (THE x. P(x))=a
-\tdx{theI}:         \isasymexists! x. P(x) ==> P(THE x. P(x))
-
-\tdx{if_P}:          P ==> (if P then a else b) = a
-\tdx{if_not_P}:     ~P ==> (if P then a else b) = b
-
-\tdx{mem_asym}:     [| a\isasymin{}b;  b\isasymin{}a |] ==> P
-\tdx{mem_irrefl}:   a\isasymin{}a ==> P
-\end{alltt*}
-\caption{Descriptions; non-circularity} \label{zf-the}
-\end{figure}
-
-
-\subsection{Unordered pairs and finite sets}
-Figure~\ref{zf-upair1} presents the principle of unordered pairing, along
-with its derived rules.  Binary union and intersection are defined in terms
-of ordered pairs (Fig.\ts\ref{zf-Un}).  Set difference is also included.  The
-rule \tdx{UnCI} is useful for classical reasoning about unions,
-like \isa{disjCI}\@; it supersedes \tdx{UnI1} and
-\tdx{UnI2}, but these rules are often easier to work with.  For
-intersection and difference we have both elimination and destruction rules.
-Again, there is no reason to provide a minimal rule set.
-
-Figure~\ref{zf-upair2} is concerned with finite sets: it presents rules
-for~\isa{cons}, the finite set constructor, and rules for singleton
-sets.  Figure~\ref{zf-succ} presents derived rules for the successor
-function, which is defined in terms of~\isa{cons}.  The proof that 
-\isa{succ} is injective appears to require the Axiom of Foundation.
-
-Definite descriptions (\sdx{THE}) are defined in terms of the singleton
-set~$\{0\}$, but their derived rules fortunately hide this
-(Fig.\ts\ref{zf-the}).  The rule~\tdx{theI} is difficult to apply
-because of the two occurrences of~$\Var{P}$.  However,
-\tdx{the_equality} does not have this problem and the files contain
-many examples of its use.
-
-Finally, the impossibility of having both $a\in b$ and $b\in a$
-(\tdx{mem_asym}) is proved by applying the Axiom of Foundation to
-the set $\{a,b\}$.  The impossibility of $a\in a$ is a trivial consequence.
-
-
-%%% subset.thy?
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Union_upper}:    B\isasymin{}A ==> B \isasymsubseteq Union(A)
-\tdx{Union_least}:    [| !!x. x\isasymin{}A ==> x \isasymsubseteq C |] ==> Union(A) \isasymsubseteq C
-
-\tdx{Inter_lower}:    B\isasymin{}A ==> Inter(A) \isasymsubseteq B
-\tdx{Inter_greatest}: [| a\isasymin{}A; !!x. x\isasymin{}A ==> C \isasymsubseteq x |] ==> C\isasymsubseteq{}Inter(A)
-
-\tdx{Un_upper1}:      A \isasymsubseteq A \isasymunion B
-\tdx{Un_upper2}:      B \isasymsubseteq A \isasymunion B
-\tdx{Un_least}:       [| A \isasymsubseteq C;  B \isasymsubseteq C |] ==> A \isasymunion B \isasymsubseteq C
-
-\tdx{Int_lower1}:     A \isasyminter B \isasymsubseteq A
-\tdx{Int_lower2}:     A \isasyminter B \isasymsubseteq B
-\tdx{Int_greatest}:   [| C \isasymsubseteq A;  C \isasymsubseteq B |] ==> C \isasymsubseteq A \isasyminter B
-
-\tdx{Diff_subset}:    A-B \isasymsubseteq A
-\tdx{Diff_contains}:  [| C \isasymsubseteq A;  C \isasyminter B = 0 |] ==> C \isasymsubseteq A-B
-
-\tdx{Collect_subset}: Collect(A,P) \isasymsubseteq A
-\end{alltt*}
-\caption{Subset and lattice properties} \label{zf-subset}
-\end{figure}
-
-
-\subsection{Subset and lattice properties}
-The subset relation is a complete lattice.  Unions form least upper bounds;
-non-empty intersections form greatest lower bounds.  Figure~\ref{zf-subset}
-shows the corresponding rules.  A few other laws involving subsets are
-included. 
-Reasoning directly about subsets often yields clearer proofs than
-reasoning about the membership relation.  Section~\ref{sec:ZF-pow-example}
-below presents an example of this, proving the equation 
-${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.
-
-%%% pair.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Pair_inject1}: <a,b> = <c,d> ==> a=c
-\tdx{Pair_inject2}: <a,b> = <c,d> ==> b=d
-\tdx{Pair_inject}:  [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
-\tdx{Pair_neq_0}:   <a,b>=0 ==> P
-
-\tdx{fst_conv}:     fst(<a,b>) = a
-\tdx{snd_conv}:     snd(<a,b>) = b
-\tdx{split}:        split(\%x y. c(x,y), <a,b>) = c(a,b)
-
-\tdx{SigmaI}:     [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> <a,b>\isasymin{}Sigma(A,B)
-
-\tdx{SigmaE}:     [| c\isasymin{}Sigma(A,B);  
-                !!x y.[| x\isasymin{}A; y\isasymin{}B(x); c=<x,y> |] ==> P |] ==> P
-
-\tdx{SigmaE2}:    [| <a,b>\isasymin{}Sigma(A,B);    
-                [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> P   |] ==> P
-\end{alltt*}
-\caption{Ordered pairs; projections; general sums} \label{zf-pair}
-\end{figure}
-
-
-\subsection{Ordered pairs} \label{sec:pairs}
-
-Figure~\ref{zf-pair} presents the rules governing ordered pairs,
-projections and general sums --- in particular, that
-$\{\{a\},\{a,b\}\}$ functions as an ordered pair.  This property is
-expressed as two destruction rules,
-\tdx{Pair_inject1} and \tdx{Pair_inject2}, and equivalently
-as the elimination rule \tdx{Pair_inject}.
-
-The rule \tdx{Pair_neq_0} asserts $\pair{a,b}\neq\emptyset$.  This
-is a property of $\{\{a\},\{a,b\}\}$, and need not hold for other 
-encodings of ordered pairs.  The non-standard ordered pairs mentioned below
-satisfy $\pair{\emptyset;\emptyset}=\emptyset$.
-
-The natural deduction rules \tdx{SigmaI} and \tdx{SigmaE}
-assert that \cdx{Sigma}$(A,B)$ consists of all pairs of the form
-$\pair{x,y}$, for $x\in A$ and $y\in B(x)$.  The rule \tdx{SigmaE2}
-merely states that $\pair{a,b}\in \isa{Sigma}(A,B)$ implies $a\in A$ and
-$b\in B(a)$.
-
-In addition, it is possible to use tuples as patterns in abstractions:
-\begin{center}
-{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \isa{split(\%$x$ $y$.\ $t$)}
-\end{center}
-Nested patterns are translated recursively:
-{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
-\isa{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \isa{split(\%$x$. split(\%$y$
-  $z$.\ $t$))}.  The reverse translation is performed upon printing.
-\begin{warn}
-  The translation between patterns and \isa{split} is performed automatically
-  by the parser and printer.  Thus the internal and external form of a term
-  may differ, which affects proofs.  For example the term \isa{(\%<x,y>.<y,x>)<a,b>} requires the theorem \isa{split} to rewrite to
-  {\tt<b,a>}.
-\end{warn}
-In addition to explicit $\lambda$-abstractions, patterns can be used in any
-variable binding construct which is internally described by a
-$\lambda$-abstraction.  Here are some important examples:
-\begin{description}
-\item[Let:] \isa{let {\it pattern} = $t$ in $u$}
-\item[Choice:] \isa{THE~{\it pattern}~.~$P$}
-\item[Set operations:] \isa{\isasymUnion~{\it pattern}:$A$.~$B$}
-\item[Comprehension:] \isa{{\ttlbrace}~{\it pattern}:$A$~.~$P$~{\ttrbrace}}
-\end{description}
-
-
-%%% domrange.thy?
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{domainI}:     <a,b>\isasymin{}r ==> a\isasymin{}domain(r)
-\tdx{domainE}:     [| a\isasymin{}domain(r); !!y. <a,y>\isasymin{}r ==> P |] ==> P
-\tdx{domain_subset}: domain(Sigma(A,B)) \isasymsubseteq A
-
-\tdx{rangeI}:      <a,b>\isasymin{}r ==> b\isasymin{}range(r)
-\tdx{rangeE}:      [| b\isasymin{}range(r); !!x. <x,b>\isasymin{}r ==> P |] ==> P
-\tdx{range_subset}: range(A*B) \isasymsubseteq B
-
-\tdx{fieldI1}:     <a,b>\isasymin{}r ==> a\isasymin{}field(r)
-\tdx{fieldI2}:     <a,b>\isasymin{}r ==> b\isasymin{}field(r)
-\tdx{fieldCI}:     (<c,a> \isasymnotin r ==> <a,b>\isasymin{}r) ==> a\isasymin{}field(r)
-
-\tdx{fieldE}:      [| a\isasymin{}field(r); 
-                !!x. <a,x>\isasymin{}r ==> P; 
-                !!x. <x,a>\isasymin{}r ==> P      
-             |] ==> P
-
-\tdx{field_subset}:  field(A*A) \isasymsubseteq A
-\end{alltt*}
-\caption{Domain, range and field of a relation} \label{zf-domrange}
-\end{figure}
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{imageI}:      [| <a,b>\isasymin{}r; a\isasymin{}A |] ==> b\isasymin{}r``A
-\tdx{imageE}:      [| b\isasymin{}r``A; !!x.[| <x,b>\isasymin{}r; x\isasymin{}A |] ==> P |] ==> P
-
-\tdx{vimageI}:     [| <a,b>\isasymin{}r; b\isasymin{}B |] ==> a\isasymin{}r-``B
-\tdx{vimageE}:     [| a\isasymin{}r-``B; !!x.[| <a,x>\isasymin{}r;  x\isasymin{}B |] ==> P |] ==> P
-\end{alltt*}
-\caption{Image and inverse image} \label{zf-domrange2}
-\end{figure}
-
-
-\subsection{Relations}
-Figure~\ref{zf-domrange} presents rules involving relations, which are sets
-of ordered pairs.  The converse of a relation~$r$ is the set of all pairs
-$\pair{y,x}$ such that $\pair{x,y}\in r$; if $r$ is a function, then
-{\cdx{converse}$(r)$} is its inverse.  The rules for the domain
-operation, namely \tdx{domainI} and~\tdx{domainE}, assert that
-\cdx{domain}$(r)$ consists of all~$x$ such that $r$ contains
-some pair of the form~$\pair{x,y}$.  The range operation is similar, and
-the field of a relation is merely the union of its domain and range.  
-
-Figure~\ref{zf-domrange2} presents rules for images and inverse images.
-Note that these operations are generalisations of range and domain,
-respectively. 
-
-
-%%% func.thy
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{fun_is_rel}:     f\isasymin{}Pi(A,B) ==> f \isasymsubseteq Sigma(A,B)
-
-\tdx{apply_equality}:  [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> f`a = b
-\tdx{apply_equality2}: [| <a,b>\isasymin{}f; <a,c>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b=c
-
-\tdx{apply_type}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> f`a\isasymin{}B(a)
-\tdx{apply_Pair}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> <a,f`a>\isasymin{}f
-\tdx{apply_iff}:      f\isasymin{}Pi(A,B) ==> <a,b>\isasymin{}f <-> a\isasymin{}A & f`a = b
-
-\tdx{fun_extension}:  [| f\isasymin{}Pi(A,B); g\isasymin{}Pi(A,D);
-                   !!x. x\isasymin{}A ==> f`x = g`x     |] ==> f=g
-
-\tdx{domain_type}:    [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> a\isasymin{}A
-\tdx{range_type}:     [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b\isasymin{}B(a)
-
-\tdx{Pi_type}:        [| f\isasymin{}A->C; !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> f\isasymin{}Pi(A,B)
-\tdx{domain_of_fun}:  f\isasymin{}Pi(A,B) ==> domain(f)=A
-\tdx{range_of_fun}:   f\isasymin{}Pi(A,B) ==> f\isasymin{}A->range(f)
-
-\tdx{restrict}:       a\isasymin{}A ==> restrict(f,A) ` a = f`a
-\tdx{restrict_type}:  [| !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> 
-                restrict(f,A)\isasymin{}Pi(A,B)
-\end{alltt*}
-\caption{Functions} \label{zf-func1}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{lamI}:     a\isasymin{}A ==> <a,b(a)>\isasymin{}(lam x\isasymin{}A. b(x))
-\tdx{lamE}:     [| p\isasymin{}(lam x\isasymin{}A. b(x)); !!x.[| x\isasymin{}A; p=<x,b(x)> |] ==> P 
-          |] ==>  P
-
-\tdx{lam_type}: [| !!x. x\isasymin{}A ==> b(x)\isasymin{}B(x) |] ==> (lam x\isasymin{}A. b(x))\isasymin{}Pi(A,B)
-
-\tdx{beta}:     a\isasymin{}A ==> (lam x\isasymin{}A. b(x)) ` a = b(a)
-\tdx{eta}:      f\isasymin{}Pi(A,B) ==> (lam x\isasymin{}A. f`x) = f
-\end{alltt*}
-\caption{$\lambda$-abstraction} \label{zf-lam}
-\end{figure}
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{fun_empty}:           0\isasymin{}0->0
-\tdx{fun_single}:          {\ttlbrace}<a,b>{\ttrbrace}\isasymin{}{\ttlbrace}a{\ttrbrace} -> {\ttlbrace}b{\ttrbrace}
-
-\tdx{fun_disjoint_Un}:     [| f\isasymin{}A->B; g\isasymin{}C->D; A \isasyminter C = 0  |] ==>  
-                     (f \isasymunion g)\isasymin{}(A \isasymunion C) -> (B \isasymunion D)
-
-\tdx{fun_disjoint_apply1}: [| a\isasymin{}A; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
-                     (f \isasymunion g)`a = f`a
-
-\tdx{fun_disjoint_apply2}: [| c\isasymin{}C; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
-                     (f \isasymunion g)`c = g`c
-\end{alltt*}
-\caption{Constructing functions from smaller sets} \label{zf-func2}
-\end{figure}
-
-
-\subsection{Functions}
-Functions, represented by graphs, are notoriously difficult to reason
-about.  The ZF theory provides many derived rules, which overlap more
-than they ought.  This section presents the more important rules.
-
-Figure~\ref{zf-func1} presents the basic properties of \cdx{Pi}$(A,B)$,
-the generalized function space.  For example, if $f$ is a function and
-$\pair{a,b}\in f$, then $f`a=b$ (\tdx{apply_equality}).  Two functions
-are equal provided they have equal domains and deliver equals results
-(\tdx{fun_extension}).
-
-By \tdx{Pi_type}, a function typing of the form $f\in A\to C$ can be
-refined to the dependent typing $f\in\prod@{x\in A}B(x)$, given a suitable
-family of sets $\{B(x)\}@{x\in A}$.  Conversely, by \tdx{range_of_fun},
-any dependent typing can be flattened to yield a function type of the form
-$A\to C$; here, $C=\isa{range}(f)$.
-
-Among the laws for $\lambda$-abstraction, \tdx{lamI} and \tdx{lamE}
-describe the graph of the generated function, while \tdx{beta} and
-\tdx{eta} are the standard conversions.  We essentially have a
-dependently-typed $\lambda$-calculus (Fig.\ts\ref{zf-lam}).
-
-Figure~\ref{zf-func2} presents some rules that can be used to construct
-functions explicitly.  We start with functions consisting of at most one
-pair, and may form the union of two functions provided their domains are
-disjoint.  
-
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Int_absorb}:        A \isasyminter A = A
-\tdx{Int_commute}:       A \isasyminter B = B \isasyminter A
-\tdx{Int_assoc}:         (A \isasyminter B) \isasyminter C  =  A \isasyminter (B \isasyminter C)
-\tdx{Int_Un_distrib}:    (A \isasymunion B) \isasyminter C  =  (A \isasyminter C) \isasymunion (B \isasyminter C)
-
-\tdx{Un_absorb}:         A \isasymunion A = A
-\tdx{Un_commute}:        A \isasymunion B = B \isasymunion A
-\tdx{Un_assoc}:          (A \isasymunion B) \isasymunion C  =  A \isasymunion (B \isasymunion C)
-\tdx{Un_Int_distrib}:    (A \isasyminter B) \isasymunion C  =  (A \isasymunion C) \isasyminter (B \isasymunion C)
-
-\tdx{Diff_cancel}:       A-A = 0
-\tdx{Diff_disjoint}:     A \isasyminter (B-A) = 0
-\tdx{Diff_partition}:    A \isasymsubseteq B ==> A \isasymunion (B-A) = B
-\tdx{double_complement}: [| A \isasymsubseteq B; B \isasymsubseteq C |] ==> (B - (C-A)) = A
-\tdx{Diff_Un}:           A - (B \isasymunion C) = (A-B) \isasyminter (A-C)
-\tdx{Diff_Int}:          A - (B \isasyminter C) = (A-B) \isasymunion (A-C)
-
-\tdx{Union_Un_distrib}:  Union(A \isasymunion B) = Union(A) \isasymunion Union(B)
-\tdx{Inter_Un_distrib}:  [| a \isasymin A;  b \isasymin B |] ==> 
-                   Inter(A \isasymunion B) = Inter(A) \isasyminter Inter(B)
-
-\tdx{Int_Union_RepFun}:  A \isasyminter Union(B) = ({\isasymUnion}C \isasymin B. A \isasyminter C)
-
-\tdx{Un_Inter_RepFun}:   b \isasymin B ==> 
-                   A \isasymunion Inter(B) = ({\isasymInter}C \isasymin B. A \isasymunion C)
-
-\tdx{SUM_Un_distrib1}:   (SUM x \isasymin A \isasymunion B. C(x)) = 
-                   (SUM x \isasymin A. C(x)) \isasymunion (SUM x \isasymin B. C(x))
-
-\tdx{SUM_Un_distrib2}:   (SUM x \isasymin C. A(x) \isasymunion B(x)) =
-                   (SUM x \isasymin C. A(x)) \isasymunion (SUM x \isasymin C. B(x))
-
-\tdx{SUM_Int_distrib1}:  (SUM x \isasymin A \isasyminter B. C(x)) =
-                   (SUM x \isasymin A. C(x)) \isasyminter (SUM x \isasymin B. C(x))
-
-\tdx{SUM_Int_distrib2}:  (SUM x \isasymin C. A(x) \isasyminter B(x)) =
-                   (SUM x \isasymin C. A(x)) \isasyminter (SUM x \isasymin C. B(x))
-\end{alltt*}
-\caption{Equalities} \label{zf-equalities}
-\end{figure}
-
-
-\begin{figure}
-%\begin{constants} 
-%  \cdx{1}       & $i$           &       & $\{\emptyset\}$       \\
-%  \cdx{bool}    & $i$           &       & the set $\{\emptyset,1\}$     \\
-%  \cdx{cond}   & $[i,i,i]\To i$ &       & conditional for \isa{bool}    \\
-%  \cdx{not}    & $i\To i$       &       & negation for \isa{bool}       \\
-%  \sdx{and}    & $[i,i]\To i$   & Left 70 & conjunction for \isa{bool}  \\
-%  \sdx{or}     & $[i,i]\To i$   & Left 65 & disjunction for \isa{bool}  \\
-%  \sdx{xor}    & $[i,i]\To i$   & Left 65 & exclusive-or for \isa{bool}
-%\end{constants}
-%
-\begin{alltt*}\isastyleminor
-\tdx{bool_def}:      bool == {\ttlbrace}0,1{\ttrbrace}
-\tdx{cond_def}:      cond(b,c,d) == if b=1 then c else d
-\tdx{not_def}:       not(b)  == cond(b,0,1)
-\tdx{and_def}:       a and b == cond(a,b,0)
-\tdx{or_def}:        a or b  == cond(a,1,b)
-\tdx{xor_def}:       a xor b == cond(a,not(b),b)
-
-\tdx{bool_1I}:       1 \isasymin bool
-\tdx{bool_0I}:       0 \isasymin bool
-\tdx{boolE}:         [| c \isasymin bool;  c=1 ==> P;  c=0 ==> P |] ==> P
-\tdx{cond_1}:        cond(1,c,d) = c
-\tdx{cond_0}:        cond(0,c,d) = d
-\end{alltt*}
-\caption{The booleans} \label{zf-bool}
-\end{figure}
-
-
-\section{Further developments}
-The next group of developments is complex and extensive, and only
-highlights can be covered here.  It involves many theories and proofs. 
-
-Figure~\ref{zf-equalities} presents commutative, associative, distributive,
-and idempotency laws of union and intersection, along with other equations.
-
-Theory \thydx{Bool} defines $\{0,1\}$ as a set of booleans, with the usual
-operators including a conditional (Fig.\ts\ref{zf-bool}).  Although ZF is a
-first-order theory, you can obtain the effect of higher-order logic using
-\isa{bool}-valued functions, for example.  The constant~\isa{1} is
-translated to \isa{succ(0)}.
-
-\begin{figure}
-\index{*"+ symbol}
-\begin{constants}
-  \it symbol    & \it meta-type & \it priority & \it description \\ 
-  \tt +         & $[i,i]\To i$  &  Right 65     & disjoint union operator\\
-  \cdx{Inl}~~\cdx{Inr}  & $i\To i$      &       & injections\\
-  \cdx{case}    & $[i\To i,i\To i, i]\To i$ &   & conditional for $A+B$
-\end{constants}
-\begin{alltt*}\isastyleminor
-\tdx{sum_def}:   A+B == {\ttlbrace}0{\ttrbrace}*A \isasymunion {\ttlbrace}1{\ttrbrace}*B
-\tdx{Inl_def}:   Inl(a) == <0,a>
-\tdx{Inr_def}:   Inr(b) == <1,b>
-\tdx{case_def}:  case(c,d,u) == split(\%y z. cond(y, d(z), c(z)), u)
-
-\tdx{InlI}:      a \isasymin A ==> Inl(a) \isasymin A+B
-\tdx{InrI}:      b \isasymin B ==> Inr(b) \isasymin A+B
-
-\tdx{Inl_inject}:  Inl(a)=Inl(b) ==> a=b
-\tdx{Inr_inject}:  Inr(a)=Inr(b) ==> a=b
-\tdx{Inl_neq_Inr}: Inl(a)=Inr(b) ==> P
-
-\tdx{sum_iff}:  u \isasymin A+B <-> ({\isasymexists}x\isasymin{}A. u=Inl(x)) | ({\isasymexists}y\isasymin{}B. u=Inr(y))
-
-\tdx{case_Inl}:  case(c,d,Inl(a)) = c(a)
-\tdx{case_Inr}:  case(c,d,Inr(b)) = d(b)
-\end{alltt*}
-\caption{Disjoint unions} \label{zf-sum}
-\end{figure}
-
-
-\subsection{Disjoint unions}
-
-Theory \thydx{Sum} defines the disjoint union of two sets, with
-injections and a case analysis operator (Fig.\ts\ref{zf-sum}).  Disjoint
-unions play a role in datatype definitions, particularly when there is
-mutual recursion~\cite{paulson-set-II}.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{QPair_def}:      <a;b> == a+b
-\tdx{qsplit_def}:     qsplit(c,p)  == THE y. {\isasymexists}a b. p=<a;b> & y=c(a,b)
-\tdx{qfsplit_def}:    qfsplit(R,z) == {\isasymexists}x y. z=<x;y> & R(x,y)
-\tdx{qconverse_def}:  qconverse(r) == {\ttlbrace}z. w \isasymin r, {\isasymexists}x y. w=<x;y> & z=<y;x>{\ttrbrace}
-\tdx{QSigma_def}:     QSigma(A,B)  == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x;y>{\ttrbrace}
-
-\tdx{qsum_def}:       A <+> B      == ({\ttlbrace}0{\ttrbrace} <*> A) \isasymunion ({\ttlbrace}1{\ttrbrace} <*> B)
-\tdx{QInl_def}:       QInl(a)      == <0;a>
-\tdx{QInr_def}:       QInr(b)      == <1;b>
-\tdx{qcase_def}:      qcase(c,d)   == qsplit(\%y z. cond(y, d(z), c(z)))
-\end{alltt*}
-\caption{Non-standard pairs, products and sums} \label{zf-qpair}
-\end{figure}
-
-
-\subsection{Non-standard ordered pairs}
-
-Theory \thydx{QPair} defines a notion of ordered pair that admits
-non-well-founded tupling (Fig.\ts\ref{zf-qpair}).  Such pairs are written
-{\tt<$a$;$b$>}.  It also defines the eliminator \cdx{qsplit}, the
-converse operator \cdx{qconverse}, and the summation operator
-\cdx{QSigma}.  These are completely analogous to the corresponding
-versions for standard ordered pairs.  The theory goes on to define a
-non-standard notion of disjoint sum using non-standard pairs.  All of these
-concepts satisfy the same properties as their standard counterparts; in
-addition, {\tt<$a$;$b$>} is continuous.  The theory supports coinductive
-definitions, for example of infinite lists~\cite{paulson-mscs}.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{bnd_mono_def}:  bnd_mono(D,h) == 
-               h(D)\isasymsubseteq{}D & ({\isasymforall}W X. W\isasymsubseteq{}X --> X\isasymsubseteq{}D --> h(W)\isasymsubseteq{}h(X))
-
-\tdx{lfp_def}:       lfp(D,h) == Inter({\ttlbrace}X \isasymin Pow(D). h(X) \isasymsubseteq X{\ttrbrace})
-\tdx{gfp_def}:       gfp(D,h) == Union({\ttlbrace}X \isasymin Pow(D). X \isasymsubseteq h(X){\ttrbrace})
-
-
-\tdx{lfp_lowerbound}: [| h(A) \isasymsubseteq A;  A \isasymsubseteq D |] ==> lfp(D,h) \isasymsubseteq A
-
-\tdx{lfp_subset}:    lfp(D,h) \isasymsubseteq D
-
-\tdx{lfp_greatest}:  [| bnd_mono(D,h);  
-                  !!X. [| h(X) \isasymsubseteq X;  X \isasymsubseteq D |] ==> A \isasymsubseteq X 
-               |] ==> A \isasymsubseteq lfp(D,h)
-
-\tdx{lfp_Tarski}:    bnd_mono(D,h) ==> lfp(D,h) = h(lfp(D,h))
-
-\tdx{induct}:        [| a \isasymin lfp(D,h);  bnd_mono(D,h);
-                  !!x. x \isasymin h(Collect(lfp(D,h),P)) ==> P(x)
-               |] ==> P(a)
-
-\tdx{lfp_mono}:      [| bnd_mono(D,h);  bnd_mono(E,i);
-                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
-               |] ==> lfp(D,h) \isasymsubseteq lfp(E,i)
-
-\tdx{gfp_upperbound}: [| A \isasymsubseteq h(A);  A \isasymsubseteq D |] ==> A \isasymsubseteq gfp(D,h)
-
-\tdx{gfp_subset}:    gfp(D,h) \isasymsubseteq D
-
-\tdx{gfp_least}:     [| bnd_mono(D,h);  
-                  !!X. [| X \isasymsubseteq h(X);  X \isasymsubseteq D |] ==> X \isasymsubseteq A
-               |] ==> gfp(D,h) \isasymsubseteq A
-
-\tdx{gfp_Tarski}:    bnd_mono(D,h) ==> gfp(D,h) = h(gfp(D,h))
-
-\tdx{coinduct}:      [| bnd_mono(D,h); a \isasymin X; X \isasymsubseteq h(X \isasymunion gfp(D,h)); X \isasymsubseteq D 
-               |] ==> a \isasymin gfp(D,h)
-
-\tdx{gfp_mono}:      [| bnd_mono(D,h);  D \isasymsubseteq E;
-                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
-               |] ==> gfp(D,h) \isasymsubseteq gfp(E,i)
-\end{alltt*}
-\caption{Least and greatest fixedpoints} \label{zf-fixedpt}
-\end{figure}
-
-
-\subsection{Least and greatest fixedpoints}
-
-The Knaster-Tarski Theorem states that every monotone function over a
-complete lattice has a fixedpoint.  Theory \thydx{Fixedpt} proves the
-Theorem only for a particular lattice, namely the lattice of subsets of a
-set (Fig.\ts\ref{zf-fixedpt}).  The theory defines least and greatest
-fixedpoint operators with corresponding induction and coinduction rules.
-These are essential to many definitions that follow, including the natural
-numbers and the transitive closure operator.  The (co)inductive definition
-package also uses the fixedpoint operators~\cite{paulson-CADE}.  See
-Davey and Priestley~\cite{davey-priestley} for more on the Knaster-Tarski
-Theorem and my paper~\cite{paulson-set-II} for discussion of the Isabelle
-proofs.
-
-Monotonicity properties are proved for most of the set-forming operations:
-union, intersection, Cartesian product, image, domain, range, etc.  These
-are useful for applying the Knaster-Tarski Fixedpoint Theorem.  The proofs
-themselves are trivial applications of Isabelle's classical reasoner. 
-
-
-\subsection{Finite sets and lists}
-
-Theory \texttt{Finite} (Figure~\ref{zf-fin}) defines the finite set operator;
-$\isa{Fin}(A)$ is the set of all finite sets over~$A$.  The theory employs
-Isabelle's inductive definition package, which proves various rules
-automatically.  The induction rule shown is stronger than the one proved by
-the package.  The theory also defines the set of all finite functions
-between two given sets.
-
-\begin{figure}
-\begin{alltt*}\isastyleminor
-\tdx{Fin.emptyI}      0 \isasymin Fin(A)
-\tdx{Fin.consI}       [| a \isasymin A;  b \isasymin Fin(A) |] ==> cons(a,b) \isasymin Fin(A)
-
-\tdx{Fin_induct}
-    [| b \isasymin Fin(A);
-       P(0);
-       !!x y. [| x\isasymin{}A; y\isasymin{}Fin(A); x\isasymnotin{}y; P(y) |] ==> P(cons(x,y))
-    |] ==> P(b)
-
-\tdx{Fin_mono}:       A \isasymsubseteq B ==> Fin(A) \isasymsubseteq Fin(B)
-\tdx{Fin_UnI}:        [| b \isasymin Fin(A);  c \isasymin Fin(A) |] ==> b \isasymunion c \isasymin Fin(A)
-\tdx{Fin_UnionI}:     C \isasymin Fin(Fin(A)) ==> Union(C) \isasymin Fin(A)
-\tdx{Fin_subset}:     [| c \isasymsubseteq b;  b \isasymin Fin(A) |] ==> c \isasymin Fin(A)
-\end{alltt*}
-\caption{The finite set operator} \label{zf-fin}
-\end{figure}
-
-\begin{figure}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{list}    & $i\To i$      && lists over some set\\
-  \cdx{list_case} & $[i, [i,i]\To i, i] \To i$  && conditional for $list(A)$ \\
-  \cdx{map}     & $[i\To i, i] \To i$   &       & mapping functional\\
-  \cdx{length}  & $i\To i$              &       & length of a list\\
-  \cdx{rev}     & $i\To i$              &       & reverse of a list\\
-  \tt \at       & $[i,i]\To i$  &  Right 60     & append for lists\\
-  \cdx{flat}    & $i\To i$   &                  & append of list of lists
-\end{constants}
-
-\underscoreon %%because @ is used here
-\begin{alltt*}\isastyleminor
-\tdx{NilI}:       Nil \isasymin list(A)
-\tdx{ConsI}:      [| a \isasymin A;  l \isasymin list(A) |] ==> Cons(a,l) \isasymin list(A)
-
-\tdx{List.induct}
-    [| l \isasymin list(A);
-       P(Nil);
-       !!x y. [| x \isasymin A;  y \isasymin list(A);  P(y) |] ==> P(Cons(x,y))
-    |] ==> P(l)
-
-\tdx{Cons_iff}:       Cons(a,l)=Cons(a',l') <-> a=a' & l=l'
-\tdx{Nil_Cons_iff}:    Nil \isasymnoteq Cons(a,l)
-
-\tdx{list_mono}:      A \isasymsubseteq B ==> list(A) \isasymsubseteq list(B)
-
-\tdx{map_ident}:      l\isasymin{}list(A) ==> map(\%u. u, l) = l
-\tdx{map_compose}:    l\isasymin{}list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
-\tdx{map_app_distrib}: xs\isasymin{}list(A) ==> map(h, xs@ys) = map(h,xs)@map(h,ys)
-\tdx{map_type}
-    [| l\isasymin{}list(A); !!x. x\isasymin{}A ==> h(x)\isasymin{}B |] ==> map(h,l)\isasymin{}list(B)
-\tdx{map_flat}
-    ls: list(list(A)) ==> map(h, flat(ls)) = flat(map(map(h),ls))
-\end{alltt*}
-\caption{Lists} \label{zf-list}
-\end{figure}
-
-
-Figure~\ref{zf-list} presents the set of lists over~$A$, $\isa{list}(A)$.  The
-definition employs Isabelle's datatype package, which defines the introduction
-and induction rules automatically, as well as the constructors, case operator
-(\isa{list\_case}) and recursion operator.  The theory then defines the usual
-list functions by primitive recursion.  See theory \texttt{List}.
-
-
-\subsection{Miscellaneous}
-
-\begin{figure}
-\begin{constants} 
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \sdx{O}       & $[i,i]\To i$  &  Right 60     & composition ($\circ$) \\
-  \cdx{id}      & $i\To i$      &       & identity function \\
-  \cdx{inj}     & $[i,i]\To i$  &       & injective function space\\
-  \cdx{surj}    & $[i,i]\To i$  &       & surjective function space\\
-  \cdx{bij}     & $[i,i]\To i$  &       & bijective function space
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{comp_def}: r O s     == {\ttlbrace}xz \isasymin domain(s)*range(r) . 
-                        {\isasymexists}x y z. xz=<x,z> & <x,y> \isasymin s & <y,z> \isasymin r{\ttrbrace}
-\tdx{id_def}:   id(A)     == (lam x \isasymin A. x)
-\tdx{inj_def}:  inj(A,B)  == {\ttlbrace} f\isasymin{}A->B. {\isasymforall}w\isasymin{}A. {\isasymforall}x\isasymin{}A. f`w=f`x --> w=x {\ttrbrace}
-\tdx{surj_def}: surj(A,B) == {\ttlbrace} f\isasymin{}A->B . {\isasymforall}y\isasymin{}B. {\isasymexists}x\isasymin{}A. f`x=y {\ttrbrace}
-\tdx{bij_def}:  bij(A,B)  == inj(A,B) \isasyminter surj(A,B)
-
-
-\tdx{left_inverse}:    [| f\isasymin{}inj(A,B);  a\isasymin{}A |] ==> converse(f)`(f`a) = a
-\tdx{right_inverse}:   [| f\isasymin{}inj(A,B);  b\isasymin{}range(f) |] ==> 
-                 f`(converse(f)`b) = b
-
-\tdx{inj_converse_inj}: f\isasymin{}inj(A,B) ==> converse(f) \isasymin inj(range(f),A)
-\tdx{bij_converse_bij}: f\isasymin{}bij(A,B) ==> converse(f) \isasymin bij(B,A)
-
-\tdx{comp_type}:     [| s \isasymsubseteq A*B;  r \isasymsubseteq B*C |] ==> (r O s) \isasymsubseteq A*C
-\tdx{comp_assoc}:    (r O s) O t = r O (s O t)
-
-\tdx{left_comp_id}:  r \isasymsubseteq A*B ==> id(B) O r = r
-\tdx{right_comp_id}: r \isasymsubseteq A*B ==> r O id(A) = r
-
-\tdx{comp_func}:     [| g\isasymin{}A->B; f\isasymin{}B->C |] ==> (f O g) \isasymin A->C
-\tdx{comp_func_apply}: [| g\isasymin{}A->B; f\isasymin{}B->C; a\isasymin{}A |] ==> (f O g)`a = f`(g`a)
-
-\tdx{comp_inj}:      [| g\isasymin{}inj(A,B);  f\isasymin{}inj(B,C)  |] ==> (f O g)\isasymin{}inj(A,C)
-\tdx{comp_surj}:     [| g\isasymin{}surj(A,B); f\isasymin{}surj(B,C) |] ==> (f O g)\isasymin{}surj(A,C)
-\tdx{comp_bij}:      [| g\isasymin{}bij(A,B); f\isasymin{}bij(B,C) |] ==> (f O g)\isasymin{}bij(A,C)
-
-\tdx{left_comp_inverse}:    f\isasymin{}inj(A,B) ==> converse(f) O f = id(A)
-\tdx{right_comp_inverse}:   f\isasymin{}surj(A,B) ==> f O converse(f) = id(B)
-
-\tdx{bij_disjoint_Un}:  
-    [| f\isasymin{}bij(A,B);  g\isasymin{}bij(C,D);  A \isasyminter C = 0;  B \isasyminter D = 0 |] ==> 
-    (f \isasymunion g)\isasymin{}bij(A \isasymunion C, B \isasymunion D)
-
-\tdx{restrict_bij}: [| f\isasymin{}inj(A,B); C\isasymsubseteq{}A |] ==> restrict(f,C)\isasymin{}bij(C, f``C)
-\end{alltt*}
-\caption{Permutations} \label{zf-perm}
-\end{figure}
-
-The theory \thydx{Perm} is concerned with permutations (bijections) and
-related concepts.  These include composition of relations, the identity
-relation, and three specialized function spaces: injective, surjective and
-bijective.  Figure~\ref{zf-perm} displays many of their properties that
-have been proved.  These results are fundamental to a treatment of
-equipollence and cardinality.
-
-Theory \thydx{Univ} defines a `universe' $\isa{univ}(A)$, which is used by
-the datatype package.  This set contains $A$ and the
-natural numbers.  Vitally, it is closed under finite products: 
-$\isa{univ}(A)\times\isa{univ}(A)\subseteq\isa{univ}(A)$.  This theory also
-defines the cumulative hierarchy of axiomatic set theory, which
-traditionally is written $V@\alpha$ for an ordinal~$\alpha$.  The
-`universe' is a simple generalization of~$V@\omega$.
-
-Theory \thydx{QUniv} defines a `universe' $\isa{quniv}(A)$, which is used by
-the datatype package to construct codatatypes such as streams.  It is
-analogous to $\isa{univ}(A)$ (and is defined in terms of it) but is closed
-under the non-standard product and sum.
-
-
-\section{Automatic Tools}
-
-ZF provides the simplifier and the classical reasoner.  Moreover it supplies a
-specialized tool to infer `types' of terms.
-
-\subsection{Simplification and Classical Reasoning}
-
-ZF inherits simplification from FOL but adopts it for set theory.  The
-extraction of rewrite rules takes the ZF primitives into account.  It can
-strip bounded universal quantifiers from a formula; for example, ${\forall
-  x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
-f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from $a\in
-A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
-
-The default simpset used by \isa{simp} contains congruence rules for all of ZF's
-binding operators.  It contains all the conversion rules, such as
-\isa{fst} and
-\isa{snd}, as well as the rewrites shown in Fig.\ts\ref{zf-simpdata}.
-
-Classical reasoner methods such as \isa{blast} and \isa{auto} refer to
-a rich collection of built-in axioms for all the set-theoretic
-primitives.
-
-
-\begin{figure}
-\begin{eqnarray*}
-  a\in \emptyset        & \bimp &  \bot\\
-  a \in A \un B      & \bimp &  a\in A \disj a\in B\\
-  a \in A \int B      & \bimp &  a\in A \conj a\in B\\
-  a \in A-B             & \bimp &  a\in A \conj \lnot (a\in B)\\
-  \pair{a,b}\in \isa{Sigma}(A,B)
-                        & \bimp &  a\in A \conj b\in B(a)\\
-  a \in \isa{Collect}(A,P)      & \bimp &  a\in A \conj P(a)\\
-  (\forall x \in \emptyset. P(x)) & \bimp &  \top\\
-  (\forall x \in A. \top)       & \bimp &  \top
-\end{eqnarray*}
-\caption{Some rewrite rules for set theory} \label{zf-simpdata}
-\end{figure}
-
-
-\subsection{Type-Checking Tactics}
-\index{type-checking tactics}
-
-Isabelle/ZF provides simple tactics to help automate those proofs that are
-essentially type-checking.  Such proofs are built by applying rules such as
-these:
-\begin{ttbox}\isastyleminor
-[| ?P ==> ?a \isasymin ?A; ~?P ==> ?b \isasymin ?A |] 
-==> (if ?P then ?a else ?b) \isasymin ?A
-
-[| ?m \isasymin nat; ?n \isasymin nat |] ==> ?m #+ ?n \isasymin nat
-
-?a \isasymin ?A ==> Inl(?a) \isasymin ?A + ?B  
-\end{ttbox}
-In typical applications, the goal has the form $t\in\Var{A}$: in other words,
-we have a specific term~$t$ and need to infer its `type' by instantiating the
-set variable~$\Var{A}$.  Neither the simplifier nor the classical reasoner
-does this job well.  The if-then-else rule, and many similar ones, can make
-the classical reasoner loop.  The simplifier refuses (on principle) to
-instantiate variables during rewriting, so goals such as \isa{i\#+j \isasymin \ ?A}
-are left unsolved.
-
-The simplifier calls the type-checker to solve rewritten subgoals: this stage
-can indeed instantiate variables.  If you have defined new constants and
-proved type-checking rules for them, then declare the rules using
-the attribute \isa{TC} and the rest should be automatic.  In
-particular, the simplifier will use type-checking to help satisfy
-conditional rewrite rules. Call the method \ttindex{typecheck} to
-break down all subgoals using type-checking rules. You can add new
-type-checking rules temporarily like this:
-\begin{isabelle}
-\isacommand{apply}\ (typecheck add:\ inj_is_fun)
-\end{isabelle}
-
-
-%Though the easiest way to invoke the type-checker is via the simplifier,
-%specialized applications may require more detailed knowledge of
-%the type-checking primitives.  They are modelled on the simplifier's:
-%\begin{ttdescription}
-%\item[\ttindexbold{tcset}] is the type of tcsets: sets of type-checking rules.
-%
-%\item[\ttindexbold{addTCs}] is an infix operator to add type-checking rules to
-%  a tcset.
-%  
-%\item[\ttindexbold{delTCs}] is an infix operator to remove type-checking rules
-%  from a tcset.
-%
-%\item[\ttindexbold{typecheck_tac}] is a tactic for attempting to prove all
-%  subgoals using the rules given in its argument, a tcset.
-%\end{ttdescription}
-%
-%Tcsets, like simpsets, are associated with theories and are merged when
-%theories are merged.  There are further primitives that use the default tcset.
-%\begin{ttdescription}
-%\item[\ttindexbold{tcset}] is a function to return the default tcset; use the
-%  expression \isa{tcset()}.
-%
-%\item[\ttindexbold{AddTCs}] adds type-checking rules to the default tcset.
-%  
-%\item[\ttindexbold{DelTCs}] removes type-checking rules from the default
-%  tcset.
-%
-%\item[\ttindexbold{Typecheck_tac}] calls \isa{typecheck_tac} using the
-%  default tcset.
-%\end{ttdescription}
-%
-%To supply some type-checking rules temporarily, using \isa{Addrules} and
-%later \isa{Delrules} is the simplest way.  There is also a high-tech
-%approach.  Call the simplifier with a new solver expressed using
-%\ttindexbold{type_solver_tac} and your temporary type-checking rules.
-%\begin{ttbox}\isastyleminor
-%by (asm_simp_tac 
-%     (simpset() setSolver type_solver_tac (tcset() addTCs prems)) 2);
-%\end{ttbox}
-
-
-\section{Natural number and integer arithmetic}
-
-\index{arithmetic|(}
-
-\begin{figure}\small
-\index{#*@{\tt\#*} symbol}
-\index{*div symbol}
-\index{*mod symbol}
-\index{#+@{\tt\#+} symbol}
-\index{#-@{\tt\#-} symbol}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{nat}     & $i$                   &       & set of natural numbers \\
-  \cdx{nat_case}& $[i,i\To i,i]\To i$     &     & conditional for $nat$\\
-  \tt \#*       & $[i,i]\To i$  &  Left 70      & multiplication \\
-  \tt div       & $[i,i]\To i$  &  Left 70      & division\\
-  \tt mod       & $[i,i]\To i$  &  Left 70      & modulus\\
-  \tt \#+       & $[i,i]\To i$  &  Left 65      & addition\\
-  \tt \#-       & $[i,i]\To i$  &  Left 65      & subtraction
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{nat_def}: nat == lfp(lam r \isasymin Pow(Inf). {\ttlbrace}0{\ttrbrace} \isasymunion {\ttlbrace}succ(x). x \isasymin r{\ttrbrace}
-
-\tdx{nat_case_def}:  nat_case(a,b,k) == 
-              THE y. k=0 & y=a | ({\isasymexists}x. k=succ(x) & y=b(x))
-
-\tdx{nat_0I}:           0 \isasymin nat
-\tdx{nat_succI}:        n \isasymin nat ==> succ(n) \isasymin nat
-
-\tdx{nat_induct}:        
-    [| n \isasymin nat;  P(0);  !!x. [| x \isasymin nat;  P(x) |] ==> P(succ(x)) 
-    |] ==> P(n)
-
-\tdx{nat_case_0}:       nat_case(a,b,0) = a
-\tdx{nat_case_succ}:    nat_case(a,b,succ(m)) = b(m)
-
-\tdx{add_0_natify}:     0 #+ n = natify(n)
-\tdx{add_succ}:         succ(m) #+ n = succ(m #+ n)
-
-\tdx{mult_type}:        m #* n \isasymin nat
-\tdx{mult_0}:           0 #* n = 0
-\tdx{mult_succ}:        succ(m) #* n = n #+ (m #* n)
-\tdx{mult_commute}:     m #* n = n #* m
-\tdx{add_mult_dist}:    (m #+ n) #* k = (m #* k) #+ (n #* k)
-\tdx{mult_assoc}:       (m #* n) #* k = m #* (n #* k)
-\tdx{mod_div_equality}: m \isasymin nat ==> (m div n)#*n #+ m mod n = m
-\end{alltt*}
-\caption{The natural numbers} \label{zf-nat}
-\end{figure}
-
-\index{natural numbers}
-
-Theory \thydx{Nat} defines the natural numbers and mathematical
-induction, along with a case analysis operator.  The set of natural
-numbers, here called \isa{nat}, is known in set theory as the ordinal~$\omega$.
-
-Theory \thydx{Arith} develops arithmetic on the natural numbers
-(Fig.\ts\ref{zf-nat}).  Addition, multiplication and subtraction are defined
-by primitive recursion.  Division and remainder are defined by repeated
-subtraction, which requires well-founded recursion; the termination argument
-relies on the divisor's being non-zero.  Many properties are proved:
-commutative, associative and distributive laws, identity and cancellation
-laws, etc.  The most interesting result is perhaps the theorem $a \bmod b +
-(a/b)\times b = a$.
-
-To minimize the need for tedious proofs of $t\in\isa{nat}$, the arithmetic
-operators coerce their arguments to be natural numbers.  The function
-\cdx{natify} is defined such that $\isa{natify}(n) = n$ if $n$ is a natural
-number, $\isa{natify}(\isa{succ}(x)) =
-\isa{succ}(\isa{natify}(x))$ for all $x$, and finally
-$\isa{natify}(x)=0$ in all other cases.  The benefit is that the addition,
-subtraction, multiplication, division and remainder operators always return
-natural numbers, regardless of their arguments.  Algebraic laws (commutative,
-associative, distributive) are unconditional.  Occurrences of \isa{natify}
-as operands of those operators are simplified away.  Any remaining occurrences
-can either be tolerated or else eliminated by proving that the argument is a
-natural number.
-
-The simplifier automatically cancels common terms on the opposite sides of
-subtraction and of relations ($=$, $<$ and $\le$).  Here is an example:
-\begin{isabelle}
- 1. i \#+ j \#+ k \#- j < k \#+ l\isanewline
-\isacommand{apply}\ simp\isanewline
- 1. natify(i) < natify(l)
-\end{isabelle}
-Given the assumptions \isa{i \isasymin nat} and \isa{l \isasymin nat}, both occurrences of
-\cdx{natify} would be simplified away.
-
-
-\begin{figure}\small
-\index{$*@{\tt\$*} symbol}
-\index{$+@{\tt\$+} symbol}
-\index{$-@{\tt\$-} symbol}
-\begin{constants}
-  \it symbol  & \it meta-type & \it priority & \it description \\ 
-  \cdx{int}     & $i$                   &       & set of integers \\
-  \tt \$*       & $[i,i]\To i$  &  Left 70      & multiplication \\
-  \tt \$+       & $[i,i]\To i$  &  Left 65      & addition\\
-  \tt \$-       & $[i,i]\To i$  &  Left 65      & subtraction\\
-  \tt \$<       & $[i,i]\To o$  &  Left 50      & $<$ on integers\\
-  \tt \$<=      & $[i,i]\To o$  &  Left 50      & $\le$ on integers
-\end{constants}
-
-\begin{alltt*}\isastyleminor
-\tdx{zadd_0_intify}:    0 $+ n = intify(n)
-
-\tdx{zmult_type}:       m $* n \isasymin int
-\tdx{zmult_0}:          0 $* n = 0
-\tdx{zmult_commute}:    m $* n = n $* m
-\tdx{zadd_zmult_dist}:   (m $+ n) $* k = (m $* k) $+ (n $* k)
-\tdx{zmult_assoc}:      (m $* n) $* k = m $* (n $* k)
-\end{alltt*}
-\caption{The integers} \label{zf-int}
-\end{figure}
-
-
-\index{integers}
-
-Theory \thydx{Int} defines the integers, as equivalence classes of natural
-numbers.   Figure~\ref{zf-int} presents a tidy collection of laws.  In
-fact, a large library of facts is proved, including monotonicity laws for
-addition and multiplication, covering both positive and negative operands.  
-
-As with the natural numbers, the need for typing proofs is minimized.  All the
-operators defined in Fig.\ts\ref{zf-int} coerce their operands to integers by
-applying the function \cdx{intify}.  This function is the identity on integers
-and maps other operands to zero.
-
-Decimal notation is provided for the integers.  Numbers, written as
-\isa{\#$nnn$} or \isa{\#-$nnn$}, are represented internally in
-two's-complement binary.  Expressions involving addition, subtraction and
-multiplication of numeral constants are evaluated (with acceptable efficiency)
-by simplification.  The simplifier also collects similar terms, multiplying
-them by a numerical coefficient.  It also cancels occurrences of the same
-terms on the other side of the relational operators.  Example:
-\begin{isabelle}
- 1. y \$+ z \$+ \#-3 \$* x \$+ y \$<=  x \$* \#2 \$+
-z\isanewline
-\isacommand{apply}\ simp\isanewline
- 1. \#2 \$* y \$<= \#5 \$* x
-\end{isabelle}
-For more information on the integers, please see the theories on directory
-\texttt{ZF/Integ}. 
-
-\index{arithmetic|)}
-
-
-\section{Datatype definitions}
-\label{sec:ZF:datatype}
-\index{*datatype|(}
-
-The \ttindex{datatype} definition package of ZF constructs inductive datatypes
-similar to \ML's.  It can also construct coinductive datatypes
-(codatatypes), which are non-well-founded structures such as streams.  It
-defines the set using a fixed-point construction and proves induction rules,
-as well as theorems for recursion and case combinators.  It supplies
-mechanisms for reasoning about freeness.  The datatype package can handle both
-mutual and indirect recursion.
-
-
-\subsection{Basics}
-\label{subsec:datatype:basics}
-
-A \isa{datatype} definition has the following form:
-\[
-\begin{array}{llcl}
-\mathtt{datatype} & t@1(A@1,\ldots,A@h) & = &
-  constructor^1@1 ~\mid~ \ldots ~\mid~ constructor^1@{k@1} \\
- & & \vdots \\
-\mathtt{and} & t@n(A@1,\ldots,A@h) & = &
-  constructor^n@1~ ~\mid~ \ldots ~\mid~ constructor^n@{k@n}
-\end{array}
-\]
-Here $t@1$, \ldots,~$t@n$ are identifiers and $A@1$, \ldots,~$A@h$ are
-variables: the datatype's parameters.  Each constructor specification has the
-form \dquotesoff
-\[ C \hbox{\tt~( } \hbox{\tt"} x@1 \hbox{\tt:} T@1 \hbox{\tt"},\;
-                   \ldots,\;
-                   \hbox{\tt"} x@m \hbox{\tt:} T@m \hbox{\tt"}
-     \hbox{\tt~)}
-\]
-Here $C$ is the constructor name, and variables $x@1$, \ldots,~$x@m$ are the
-constructor arguments, belonging to the sets $T@1$, \ldots, $T@m$,
-respectively.  Typically each $T@j$ is either a constant set, a datatype
-parameter (one of $A@1$, \ldots, $A@h$) or a recursive occurrence of one of
-the datatypes, say $t@i(A@1,\ldots,A@h)$.  More complex possibilities exist,
-but they are much harder to realize.  Often, additional information must be
-supplied in the form of theorems.
-
-A datatype can occur recursively as the argument of some function~$F$.  This
-is called a {\em nested} (or \emph{indirect}) occurrence.  It is only allowed
-if the datatype package is given a theorem asserting that $F$ is monotonic.
-If the datatype has indirect occurrences, then Isabelle/ZF does not support
-recursive function definitions.
-
-A simple example of a datatype is \isa{list}, which is built-in, and is
-defined by
-\begin{alltt*}\isastyleminor
-consts     list :: "i=>i"
-datatype  "list(A)" = Nil | Cons ("a \isasymin A", "l \isasymin list(A)")
-\end{alltt*}
-Note that the datatype operator must be declared as a constant first.
-However, the package declares the constructors.  Here, \isa{Nil} gets type
-$i$ and \isa{Cons} gets type $[i,i]\To i$.
-
-Trees and forests can be modelled by the mutually recursive datatype
-definition
-\begin{alltt*}\isastyleminor
-consts   
-  tree :: "i=>i"
-  forest :: "i=>i"
-  tree_forest :: "i=>i"
-datatype  "tree(A)"   = Tcons ("a{\isasymin}A",  "f{\isasymin}forest(A)")
-and "forest(A)" = Fnil | Fcons ("t{\isasymin}tree(A)",  "f{\isasymin}forest(A)")
-\end{alltt*}
-Here $\isa{tree}(A)$ is the set of trees over $A$, $\isa{forest}(A)$ is
-the set of forests over $A$, and  $\isa{tree_forest}(A)$ is the union of
-the previous two sets.  All three operators must be declared first.
-
-The datatype \isa{term}, which is defined by
-\begin{alltt*}\isastyleminor
-consts     term :: "i=>i"
-datatype  "term(A)" = Apply ("a \isasymin A", "l \isasymin list(term(A))")
-  monos list_mono
-  type_elims list_univ [THEN subsetD, elim_format]
-\end{alltt*}
-is an example of nested recursion.  (The theorem \isa{list_mono} is proved
-in theory \isa{List}, and the \isa{term} example is developed in
-theory
-\thydx{Induct/Term}.)
-
-\subsubsection{Freeness of the constructors}
-
-Constructors satisfy {\em freeness} properties.  Constructions are distinct,
-for example $\isa{Nil}\not=\isa{Cons}(a,l)$, and they are injective, for
-example $\isa{Cons}(a,l)=\isa{Cons}(a',l') \bimp a=a' \conj l=l'$.
-Because the number of freeness is quadratic in the number of constructors, the
-datatype package does not prove them.  Instead, it ensures that simplification
-will prove them dynamically: when the simplifier encounters a formula
-asserting the equality of two datatype constructors, it performs freeness
-reasoning.  
-
-Freeness reasoning can also be done using the classical reasoner, but it is
-more complicated.  You have to add some safe elimination rules rules to the
-claset.  For the \isa{list} datatype, they are called
-\isa{list.free_elims}.  Occasionally this exposes the underlying
-representation of some constructor, which can be rectified using the command
-\isa{unfold list.con_defs [symmetric]}.
-
-
-\subsubsection{Structural induction}
-
-The datatype package also provides structural induction rules.  For datatypes
-without mutual or nested recursion, the rule has the form exemplified by
-\isa{list.induct} in Fig.\ts\ref{zf-list}.  For mutually recursive
-datatypes, the induction rule is supplied in two forms.  Consider datatype
-\isa{TF}.  The rule \isa{tree_forest.induct} performs induction over a
-single predicate~\isa{P}, which is presumed to be defined for both trees
-and forests:
-\begin{alltt*}\isastyleminor
-[| x \isasymin tree_forest(A);
-   !!a f. [| a \isasymin A; f \isasymin forest(A); P(f) |] ==> P(Tcons(a, f)); 
-   P(Fnil);
-   !!f t. [| t \isasymin tree(A); P(t); f \isasymin forest(A); P(f) |]
-          ==> P(Fcons(t, f)) 
-|] ==> P(x)
-\end{alltt*}
-The rule \isa{tree_forest.mutual_induct} performs induction over two
-distinct predicates, \isa{P_tree} and \isa{P_forest}.
-\begin{alltt*}\isastyleminor
-[| !!a f.
-      [| a{\isasymin}A; f{\isasymin}forest(A); P_forest(f) |] ==> P_tree(Tcons(a,f));
-   P_forest(Fnil);
-   !!f t. [| t{\isasymin}tree(A); P_tree(t); f{\isasymin}forest(A); P_forest(f) |]
-          ==> P_forest(Fcons(t, f)) 
-|] ==> ({\isasymforall}za. za \isasymin tree(A) --> P_tree(za)) &
-    ({\isasymforall}za. za \isasymin forest(A) --> P_forest(za))
-\end{alltt*}
-
-For datatypes with nested recursion, such as the \isa{term} example from
-above, things are a bit more complicated.  The rule \isa{term.induct}
-refers to the monotonic operator, \isa{list}:
-\begin{alltt*}\isastyleminor
-[| x \isasymin term(A);
-   !!a l. [| a\isasymin{}A; l\isasymin{}list(Collect(term(A), P)) |] ==> P(Apply(a,l)) 
-|] ==> P(x)
-\end{alltt*}
-The theory \isa{Induct/Term.thy} derives two higher-level induction rules,
-one of which is particularly useful for proving equations:
-\begin{alltt*}\isastyleminor
-[| t \isasymin term(A);
-   !!x zs. [| x \isasymin A; zs \isasymin list(term(A)); map(f, zs) = map(g, zs) |]
-           ==> f(Apply(x, zs)) = g(Apply(x, zs)) 
-|] ==> f(t) = g(t)  
-\end{alltt*}
-How this can be generalized to other nested datatypes is a matter for future
-research.
-
-
-\subsubsection{The \isa{case} operator}
-
-The package defines an operator for performing case analysis over the
-datatype.  For \isa{list}, it is called \isa{list_case} and satisfies
-the equations
-\begin{ttbox}\isastyleminor
-list_case(f_Nil, f_Cons, []) = f_Nil
-list_case(f_Nil, f_Cons, Cons(a, l)) = f_Cons(a, l)
-\end{ttbox}
-Here \isa{f_Nil} is the value to return if the argument is \isa{Nil} and
-\isa{f_Cons} is a function that computes the value to return if the
-argument has the form $\isa{Cons}(a,l)$.  The function can be expressed as
-an abstraction, over patterns if desired (\S\ref{sec:pairs}).
-
-For mutually recursive datatypes, there is a single \isa{case} operator.
-In the tree/forest example, the constant \isa{tree_forest_case} handles all
-of the constructors of the two datatypes.
-
-
-\subsection{Defining datatypes}
-
-The theory syntax for datatype definitions is shown in the
-Isabelle/Isar reference manual.  In order to be well-formed, a
-datatype definition has to obey the rules stated in the previous
-section.  As a result the theory is extended with the new types, the
-constructors, and the theorems listed in the previous section.
-
-Codatatypes are declared like datatypes and are identical to them in every
-respect except that they have a coinduction rule instead of an induction rule.
-Note that while an induction rule has the effect of limiting the values
-contained in the set, a coinduction rule gives a way of constructing new
-values of the set.
-
-Most of the theorems about datatypes become part of the default simpset.  You
-never need to see them again because the simplifier applies them
-automatically.  
-
-\subsubsection{Specialized methods for datatypes}
-
-Induction and case-analysis can be invoked using these special-purpose
-methods:
-\begin{ttdescription}
-\item[\methdx{induct_tac} $x$] applies structural
-  induction on variable $x$ to subgoal~1, provided the type of $x$ is a
-  datatype.  The induction variable should not occur among other assumptions
-  of the subgoal.
-\end{ttdescription}
-% 
-% we also have the ind_cases method, but what does it do?
-In some situations, induction is overkill and a case distinction over all
-constructors of the datatype suffices.
-\begin{ttdescription}
-\item[\methdx{case_tac} $x$]
- performs a case analysis for the variable~$x$.
-\end{ttdescription}
-
-Both tactics can only be applied to a variable, whose typing must be given in
-some assumption, for example the assumption \isa{x \isasymin \ list(A)}.  The tactics
-also work for the natural numbers (\isa{nat}) and disjoint sums, although
-these sets were not defined using the datatype package.  (Disjoint sums are
-not recursive, so only \isa{case_tac} is available.)
-
-Structured Isar methods are also available. Below, $t$ 
-stands for the name of the datatype.
-\begin{ttdescription}
-\item[\methdx{induct} \isa{set:}\ $t$] is the Isar induction tactic.
-\item[\methdx{cases} \isa{set:}\ $t$] is the Isar case-analysis tactic.
-\end{ttdescription}
-
-
-\subsubsection{The theorems proved by a datatype declaration}
-
-Here are some more details for the technically minded.  Processing the
-datatype declaration of a set~$t$ produces a name space~$t$ containing
-the following theorems:
-\begin{ttbox}\isastyleminor
-intros          \textrm{the introduction rules}
-cases           \textrm{the case analysis rule}
-induct          \textrm{the standard induction rule}
-mutual_induct   \textrm{the mutual induction rule, if needed}
-case_eqns       \textrm{equations for the case operator}
-recursor_eqns   \textrm{equations for the recursor}
-simps           \textrm{the union of} case_eqns \textrm{and} recursor_eqns
-con_defs        \textrm{definitions of the case operator and constructors}
-free_iffs       \textrm{logical equivalences for proving freeness}
-free_elims      \textrm{elimination rules for proving freeness}
-defs            \textrm{datatype definition(s)}
-\end{ttbox}
-Furthermore there is the theorem $C$ for every constructor~$C$; for
-example, the \isa{list} datatype's introduction rules are bound to the
-identifiers \isa{Nil} and \isa{Cons}.
-
-For a codatatype, the component \isa{coinduct} is the coinduction rule,
-replacing the \isa{induct} component.
-
-See the theories \isa{Induct/Ntree} and \isa{Induct/Brouwer} for examples of
-infinitely branching datatypes.  See theory \isa{Induct/LList} for an example
-of a codatatype.  Some of these theories illustrate the use of additional,
-undocumented features of the datatype package.  Datatype definitions are
-reduced to inductive definitions, and the advanced features should be
-understood in that light.
-
-
-\subsection{Examples}
-
-\subsubsection{The datatype of binary trees}
-
-Let us define the set $\isa{bt}(A)$ of binary trees over~$A$.  The theory
-must contain these lines:
-\begin{alltt*}\isastyleminor
-consts   bt :: "i=>i"
-datatype "bt(A)" = Lf | Br ("a\isasymin{}A", "t1\isasymin{}bt(A)", "t2\isasymin{}bt(A)")
-\end{alltt*}
-After loading the theory, we can prove some theorem.  
-We begin by declaring the constructor's typechecking rules
-as simplification rules:
-\begin{isabelle}
-\isacommand{declare}\ bt.intros\ [simp]%
-\end{isabelle}
-
-Our first example is the theorem that no tree equals its
-left branch.  To make the inductive hypothesis strong enough, 
-the proof requires a quantified induction formula, but 
-the \isa{rule\_format} attribute will remove the quantifiers 
-before the theorem is stored.
-\begin{isabelle}
-\isacommand{lemma}\ Br\_neq\_left\ [rule\_format]:\ "l\isasymin bt(A)\ ==>\ \isasymforall x\ r.\ Br(x,l,r)\isasymnoteq{}l"\isanewline
-\ 1.\ l\ \isasymin \ bt(A)\ \isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l%
-\end{isabelle}
-This can be proved by the structural induction tactic:
-\begin{isabelle}
-\ \ \isacommand{apply}\ (induct\_tac\ l)\isanewline
-\ 1.\ \isasymforall x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
-\ 2.\ \isasymAnd a\ t1\ t2.\isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymforall x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
-\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymforall x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
-\end{isabelle}
-Both subgoals are proved using \isa{auto}, which performs the necessary
-freeness reasoning. 
-\begin{isabelle}
-\ \ \isacommand{apply}\ auto\isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-
-An alternative proof uses Isar's fancy \isa{induct} method, which 
-automatically quantifies over all free variables:
-
-\begin{isabelle}
-\isacommand{lemma}\ Br\_neq\_left':\ "l\ \isasymin \ bt(A)\ ==>\ (!!x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l)"\isanewline
-\ \ \isacommand{apply}\ (induct\ set:\ bt)\isanewline
-\ 1.\ \isasymAnd x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
-\ 2.\ \isasymAnd a\ t1\ t2\ x\ r.\isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymAnd x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
-\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymAnd x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
-\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
-\end{isabelle}
-Compare the form of the induction hypotheses with the corresponding ones in
-the previous proof. As before, to conclude requires only \isa{auto}.
-
-When there are only a few constructors, we might prefer to prove the freenness
-theorems for each constructor.  This is simple:
-\begin{isabelle}
-\isacommand{lemma}\ Br\_iff:\ "Br(a,l,r)\ =\ Br(a',l',r')\ <->\ a=a'\ \&\ l=l'\ \&\ r=r'"\isanewline
-\ \ \isacommand{by}\ (blast\ elim!:\ bt.free\_elims)
-\end{isabelle}
-Here we see a demonstration of freeness reasoning using
-\isa{bt.free\_elims}, but simpler still is just to apply \isa{auto}.
-
-An \ttindex{inductive\_cases} declaration generates instances of the
-case analysis rule that have been simplified  using freeness
-reasoning. 
-\begin{isabelle}
-\isacommand{inductive\_cases}\ Br\_in\_bt:\ "Br(a,\ l,\ r)\ \isasymin \ bt(A)"
-\end{isabelle}
-The theorem just created is 
-\begin{isabelle}
-\isasymlbrakk Br(a,\ l,\ r)\ \isasymin \ bt(A);\ \isasymlbrakk a\ \isasymin \ A;\ l\ \isasymin \ bt(A);\ r\ \isasymin \ bt(A)\isasymrbrakk \ \isasymLongrightarrow \ Q\isasymrbrakk \ \isasymLongrightarrow \ Q.
-\end{isabelle}
-It is an elimination rule that from $\isa{Br}(a,l,r)\in\isa{bt}(A)$
-lets us infer $a\in A$, $l\in\isa{bt}(A)$ and
-$r\in\isa{bt}(A)$.
-
-
-\subsubsection{Mixfix syntax in datatypes}
-
-Mixfix syntax is sometimes convenient.  The theory \isa{Induct/PropLog} makes a
-deep embedding of propositional logic:
-\begin{alltt*}\isastyleminor
-consts     prop :: i
-datatype  "prop" = Fls
-                 | Var ("n \isasymin nat")                ("#_" [100] 100)
-                 | "=>" ("p \isasymin prop", "q \isasymin prop")   (infixr 90)
-\end{alltt*}
-The second constructor has a special $\#n$ syntax, while the third constructor
-is an infixed arrow.
-
-
-\subsubsection{A giant enumeration type}
-
-This example shows a datatype that consists of 60 constructors:
-\begin{alltt*}\isastyleminor
-consts  enum :: i
-datatype
-  "enum" = C00 | C01 | C02 | C03 | C04 | C05 | C06 | C07 | C08 | C09
-         | C10 | C11 | C12 | C13 | C14 | C15 | C16 | C17 | C18 | C19
-         | C20 | C21 | C22 | C23 | C24 | C25 | C26 | C27 | C28 | C29
-         | C30 | C31 | C32 | C33 | C34 | C35 | C36 | C37 | C38 | C39
-         | C40 | C41 | C42 | C43 | C44 | C45 | C46 | C47 | C48 | C49
-         | C50 | C51 | C52 | C53 | C54 | C55 | C56 | C57 | C58 | C59
-end
-\end{alltt*}
-The datatype package scales well.  Even though all properties are proved
-rather than assumed, full processing of this definition takes around two seconds
-(on a 1.8GHz machine).  The constructors have a balanced representation,
-related to binary notation, so freeness properties can be proved fast.
-\begin{isabelle}
-\isacommand{lemma}\ "C00 \isasymnoteq\ C01"\isanewline
-\ \ \isacommand{by}\ simp
-\end{isabelle}
-You need not derive such inequalities explicitly.  The simplifier will
-dispose of them automatically.
-
-\index{*datatype|)}
-
-
-\subsection{Recursive function definitions}\label{sec:ZF:recursive}
-\index{recursive functions|see{recursion}}
-\index{*primrec|(}
-\index{recursion!primitive|(}
-
-Datatypes come with a uniform way of defining functions, {\bf primitive
-  recursion}.  Such definitions rely on the recursion operator defined by the
-datatype package.  Isabelle proves the desired recursion equations as
-theorems.
-
-In principle, one could introduce primitive recursive functions by asserting
-their reduction rules as axioms.  Here is a dangerous way of defining a
-recursive function over binary trees:
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
-\isacommand{axioms}\isanewline
-\ \ n\_nodes\_Lf:\ "n\_nodes(Lf)\ =\ 0"\isanewline
-\ \ n\_nodes\_Br:\ "n\_nodes(Br(a,l,r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
-\end{isabelle}
-Asserting axioms brings the danger of accidentally introducing
-contradictions.  It should be avoided whenever possible.
-
-The \ttindex{primrec} declaration is a safe means of defining primitive
-recursive functions on datatypes:
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
-\isacommand{primrec}\isanewline
-\ \ "n\_nodes(Lf)\ =\ 0"\isanewline
-\ \ "n\_nodes(Br(a,\ l,\ r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
-\end{isabelle}
-Isabelle will now derive the two equations from a low-level definition  
-based upon well-founded recursion.  If they do not define a legitimate
-recursion, then Isabelle will reject the declaration.
-
-
-\subsubsection{Syntax of recursive definitions}
-
-The general form of a primitive recursive definition is
-\begin{ttbox}\isastyleminor
-primrec
-    {\it reduction rules}
-\end{ttbox}
-where \textit{reduction rules} specify one or more equations of the form
-\[ f \, x@1 \, \dots \, x@m \, (C \, y@1 \, \dots \, y@k) \, z@1 \,
-\dots \, z@n = r \] such that $C$ is a constructor of the datatype, $r$
-contains only the free variables on the left-hand side, and all recursive
-calls in $r$ are of the form $f \, \dots \, y@i \, \dots$ for some $i$.  
-There must be at most one reduction rule for each constructor.  The order is
-immaterial.  For missing constructors, the function is defined to return zero.
-
-All reduction rules are added to the default simpset.
-If you would like to refer to some rule by name, then you must prefix
-the rule with an identifier.  These identifiers, like those in the
-\isa{rules} section of a theory, will be visible in proof scripts.
-
-The reduction rules become part of the default simpset, which
-leads to short proof scripts:
-\begin{isabelle}
-\isacommand{lemma}\ n\_nodes\_type\ [simp]:\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes(t)\ \isasymin \ nat"\isanewline
-\ \ \isacommand{by}\ (induct\_tac\ t,\ auto)
-\end{isabelle}
-
-You can even use the \isa{primrec} form with non-recursive datatypes and
-with codatatypes.  Recursion is not allowed, but it provides a convenient
-syntax for defining functions by cases.
-
-
-\subsubsection{Example: varying arguments}
-
-All arguments, other than the recursive one, must be the same in each equation
-and in each recursive call.  To get around this restriction, use explict
-$\lambda$-abstraction and function application.  For example, let us
-define the tail-recursive version of \isa{n\_nodes}, using an 
-accumulating argument for the counter.  The second argument, $k$, varies in
-recursive calls.
-\begin{isabelle}
-\isacommand{consts}\ \ n\_nodes\_aux\ ::\ "i\ =>\ i"\isanewline
-\isacommand{primrec}\isanewline
-\ \ "n\_nodes\_aux(Lf)\ =\ (\isasymlambda k\ \isasymin \ nat.\ k)"\isanewline
-\ \ "n\_nodes\_aux(Br(a,l,r))\ =\ \isanewline
-\ \ \ \ \ \ (\isasymlambda k\ \isasymin \ nat.\ n\_nodes\_aux(r)\ `\ \ (n\_nodes\_aux(l)\ `\ succ(k)))"
-\end{isabelle}
-Now \isa{n\_nodes\_aux(t)\ `\ k} is our function in two arguments. We
-can prove a theorem relating it to \isa{n\_nodes}. Note the quantification
-over \isa{k\ \isasymin \ nat}:
-\begin{isabelle}
-\isacommand{lemma}\ n\_nodes\_aux\_eq\ [rule\_format]:\isanewline
-\ \ \ \ \ "t\ \isasymin \ bt(A)\ ==>\ \isasymforall k\ \isasymin \ nat.\ n\_nodes\_aux(t)`k\ =\ n\_nodes(t)\ \#+\ k"\isanewline
-\ \ \isacommand{by}\ (induct\_tac\ t,\ simp\_all)
-\end{isabelle}
-
-Now, we can use \isa{n\_nodes\_aux} to define a tail-recursive version
-of \isa{n\_nodes}:
-\begin{isabelle}
-\isacommand{constdefs}\isanewline
-\ \ n\_nodes\_tail\ ::\ "i\ =>\ i"\isanewline
-\ \ \ "n\_nodes\_tail(t)\ ==\ n\_nodes\_aux(t)\ `\ 0"
-\end{isabelle}
-It is easy to
-prove that \isa{n\_nodes\_tail} is equivalent to \isa{n\_nodes}:
-\begin{isabelle}
-\isacommand{lemma}\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes\_tail(t)\ =\ n\_nodes(t)"\isanewline
-\ \isacommand{by}\ (simp\ add:\ n\_nodes\_tail\_def\ n\_nodes\_aux\_eq)
-\end{isabelle}
-
-
-
-
-\index{recursion!primitive|)}
-\index{*primrec|)}
-
-
-\section{Inductive and coinductive definitions}
-\index{*inductive|(}
-\index{*coinductive|(}
-
-An {\bf inductive definition} specifies the least set~$R$ closed under given
-rules.  (Applying a rule to elements of~$R$ yields a result within~$R$.)  For
-example, a structural operational semantics is an inductive definition of an
-evaluation relation.  Dually, a {\bf coinductive definition} specifies the
-greatest set~$R$ consistent with given rules.  (Every element of~$R$ can be
-seen as arising by applying a rule to elements of~$R$.)  An important example
-is using bisimulation relations to formalise equivalence of processes and
-infinite data structures.
-
-A theory file may contain any number of inductive and coinductive
-definitions.  They may be intermixed with other declarations; in
-particular, the (co)inductive sets {\bf must} be declared separately as
-constants, and may have mixfix syntax or be subject to syntax translations.
-
-Each (co)inductive definition adds definitions to the theory and also
-proves some theorems.  It behaves identially to the analogous
-inductive definition except that instead of an induction rule there is
-a coinduction rule.  Its treatment of coinduction is described in
-detail in a separate paper,%
-\footnote{It appeared in CADE~\cite{paulson-CADE}; a longer version is
-  distributed with Isabelle as \emph{A Fixedpoint Approach to 
- (Co)Inductive and (Co)Datatype Definitions}.}  %
-which you might refer to for background information.
-
-
-\subsection{The syntax of a (co)inductive definition}
-An inductive definition has the form
-\begin{ttbox}\isastyleminor
-inductive
-  domains     {\it domain declarations}
-  intros      {\it introduction rules}
-  monos       {\it monotonicity theorems}
-  con_defs    {\it constructor definitions}
-  type_intros {\it introduction rules for type-checking}
-  type_elims  {\it elimination rules for type-checking}
-\end{ttbox}
-A coinductive definition is identical, but starts with the keyword
-\isa{co\-inductive}.  
-
-The \isa{monos}, \isa{con\_defs}, \isa{type\_intros} and \isa{type\_elims}
-sections are optional.  If present, each is specified as a list of
-theorems, which may contain Isar attributes as usual.
-
-\begin{description}
-\item[\it domain declarations] are items of the form
-  {\it string\/}~\isa{\isasymsubseteq }~{\it string}, associating each recursive set with
-  its domain.  (The domain is some existing set that is large enough to
-  hold the new set being defined.)
-
-\item[\it introduction rules] specify one or more introduction rules in
-  the form {\it ident\/}~{\it string}, where the identifier gives the name of
-  the rule in the result structure.
-
-\item[\it monotonicity theorems] are required for each operator applied to
-  a recursive set in the introduction rules.  There \textbf{must} be a theorem
-  of the form $A\subseteq B\Imp M(A)\subseteq M(B)$, for each premise $t\in M(R_i)$
-  in an introduction rule!
-
-\item[\it constructor definitions] contain definitions of constants
-  appearing in the introduction rules.  The (co)datatype package supplies
-  the constructors' definitions here.  Most (co)inductive definitions omit
-  this section; one exception is the primitive recursive functions example;
-  see theory \isa{Induct/Primrec}.
-  
-\item[\it type\_intros] consists of introduction rules for type-checking the
-  definition: for demonstrating that the new set is included in its domain.
-  (The proof uses depth-first search.)
-
-\item[\it type\_elims] consists of elimination rules for type-checking the
-  definition.  They are presumed to be safe and are applied as often as
-  possible prior to the \isa{type\_intros} search.
-\end{description}
-
-The package has a few restrictions:
-\begin{itemize}
-\item The theory must separately declare the recursive sets as
-  constants.
-
-\item The names of the recursive sets must be identifiers, not infix
-operators.  
-
-\item Side-conditions must not be conjunctions.  However, an introduction rule
-may contain any number of side-conditions.
-
-\item Side-conditions of the form $x=t$, where the variable~$x$ does not
-  occur in~$t$, will be substituted through the rule \isa{mutual\_induct}.
-\end{itemize}
-
-
-\subsection{Example of an inductive definition}
-
-Below, we shall see how Isabelle/ZF defines the finite powerset
-operator.  The first step is to declare the constant~\isa{Fin}.  Then we
-must declare it inductively, with two introduction rules:
-\begin{isabelle}
-\isacommand{consts}\ \ Fin\ ::\ "i=>i"\isanewline
-\isacommand{inductive}\isanewline
-\ \ \isakeyword{domains}\ \ \ "Fin(A)"\ \isasymsubseteq\ "Pow(A)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ emptyI:\ \ "0\ \isasymin\ Fin(A)"\isanewline
-\ \ \ \ consI:\ \ \ "[|\ a\ \isasymin\ A;\ \ b\ \isasymin\ Fin(A)\ |]\ ==>\ cons(a,b)\ \isasymin\ Fin(A)"\isanewline
-\ \ \isakeyword{type\_intros}\ \ empty\_subsetI\ cons\_subsetI\ PowI\isanewline
-\ \ \isakeyword{type\_elims}\ \ \ PowD\ [THEN\ revcut\_rl]\end{isabelle}
-The resulting theory contains a name space, called~\isa{Fin}.
-The \isa{Fin}$~A$ introduction rules can be referred to collectively as
-\isa{Fin.intros}, and also individually as \isa{Fin.emptyI} and
-\isa{Fin.consI}.  The induction rule is \isa{Fin.induct}.
-
-The chief problem with making (co)inductive definitions involves type-checking
-the rules.  Sometimes, additional theorems need to be supplied under
-\isa{type_intros} or \isa{type_elims}.  If the package fails when trying
-to prove your introduction rules, then set the flag \ttindexbold{trace_induct}
-to \isa{true} and try again.  (See the manual \emph{A Fixedpoint Approach
-  \ldots} for more discussion of type-checking.)
-
-In the example above, $\isa{Pow}(A)$ is given as the domain of
-$\isa{Fin}(A)$, for obviously every finite subset of~$A$ is a subset
-of~$A$.  However, the inductive definition package can only prove that given a
-few hints.
-Here is the output that results (with the flag set) when the
-\isa{type_intros} and \isa{type_elims} are omitted from the inductive
-definition above:
-\begin{alltt*}\isastyleminor
-Inductive definition Finite.Fin
-Fin(A) ==
-lfp(Pow(A),
-    \%X. {z\isasymin{}Pow(A) . z = 0 | ({\isasymexists}a b. z = cons(a,b) & a\isasymin{}A & b\isasymin{}X)})
-  Proving monotonicity...
-\ttbreak
-  Proving the introduction rules...
-The type-checking subgoal:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-*** prove_goal: tactic failed
-\end{alltt*}
-We see the need to supply theorems to let the package prove
-$\emptyset\in\isa{Pow}(A)$.  Restoring the \isa{type_intros} but not the
-\isa{type_elims}, we again get an error message:
-\begin{alltt*}\isastyleminor
-The type-checking subgoal:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-0 \isasymin Fin(A)
- 1. 0 \isasymin Pow(A)
-\ttbreak
-The type-checking subgoal:
-cons(a, b) \isasymin Fin(A)
- 1. [| a \isasymin A; b \isasymin Fin(A) |] ==> cons(a, b) \isasymin Pow(A)
-\ttbreak
-The subgoal after monos, type_elims:
-cons(a, b) \isasymin Fin(A)
- 1. [| a \isasymin A; b \isasymin Pow(A) |] ==> cons(a, b) \isasymin Pow(A)
-*** prove_goal: tactic failed
-\end{alltt*}
-The first rule has been type-checked, but the second one has failed.  The
-simplest solution to such problems is to prove the failed subgoal separately
-and to supply it under \isa{type_intros}.  The solution actually used is
-to supply, under \isa{type_elims}, a rule that changes
-$b\in\isa{Pow}(A)$ to $b\subseteq A$; together with \isa{cons_subsetI}
-and \isa{PowI}, it is enough to complete the type-checking.
-
-
-
-\subsection{Further examples}
-
-An inductive definition may involve arbitrary monotonic operators.  Here is a
-standard example: the accessible part of a relation.  Note the use
-of~\isa{Pow} in the introduction rule and the corresponding mention of the
-rule \isa{Pow\_mono} in the \isa{monos} list.  If the desired rule has a
-universally quantified premise, usually the effect can be obtained using
-\isa{Pow}.
-\begin{isabelle}
-\isacommand{consts}\ \ acc\ ::\ "i\ =>\ i"\isanewline
-\isacommand{inductive}\isanewline
-\ \ \isakeyword{domains}\ "acc(r)"\ \isasymsubseteq \ "field(r)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ vimage:\ \ "[|\ r-``\isacharbraceleft a\isacharbraceright\ \isasymin\ Pow(acc(r));\ a\ \isasymin \ field(r)\ |]
-\isanewline
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ ==>\ a\ \isasymin \ acc(r)"\isanewline
-\ \ \isakeyword{monos}\ \ Pow\_mono
-\end{isabelle}
-
-Finally, here are some coinductive definitions.  We begin by defining
-lazy (potentially infinite) lists as a codatatype:
-\begin{isabelle}
-\isacommand{consts}\ \ llist\ \ ::\ "i=>i"\isanewline
-\isacommand{codatatype}\isanewline
-\ \ "llist(A)"\ =\ LNil\ |\ LCons\ ("a\ \isasymin \ A",\ "l\ \isasymin \ llist(A)")\isanewline
-\end{isabelle}
-
-The notion of equality on such lists is modelled as a bisimulation:
-\begin{isabelle}
-\isacommand{consts}\ \ lleq\ ::\ "i=>i"\isanewline
-\isacommand{coinductive}\isanewline
-\ \ \isakeyword{domains}\ "lleq(A)"\ <=\ "llist(A)\ *\ llist(A)"\isanewline
-\ \ \isakeyword{intros}\isanewline
-\ \ \ \ LNil:\ \ "<LNil,\ LNil>\ \isasymin \ lleq(A)"\isanewline
-\ \ \ \ LCons:\ "[|\ a\ \isasymin \ A;\ <l,l'>\ \isasymin \ lleq(A)\ |]\ \isanewline
-\ \ \ \ \ \ \ \ \ \ \ \ ==>\ <LCons(a,l),\ LCons(a,l')>\ \isasymin \ lleq(A)"\isanewline
-\ \ \isakeyword{type\_intros}\ \ llist.intros
-\end{isabelle}
-This use of \isa{type_intros} is typical: the relation concerns the
-codatatype \isa{llist}, so naturally the introduction rules for that
-codatatype will be required for type-checking the rules.
-
-The Isabelle distribution contains many other inductive definitions.  Simple
-examples are collected on subdirectory \isa{ZF/Induct}.  The directory
-\isa{Coind} and the theory \isa{ZF/Induct/LList} contain coinductive
-definitions.  Larger examples may be found on other subdirectories of
-\isa{ZF}, such as \isa{IMP}, and \isa{Resid}.
-
-
-\subsection{Theorems generated}
-
-Each (co)inductive set defined in a theory file generates a name space
-containing the following elements:
-\begin{ttbox}\isastyleminor
-intros        \textrm{the introduction rules}
-elim          \textrm{the elimination (case analysis) rule}
-induct        \textrm{the standard induction rule}
-mutual_induct \textrm{the mutual induction rule, if needed}
-defs          \textrm{definitions of inductive sets}
-bnd_mono      \textrm{monotonicity property}
-dom_subset    \textrm{inclusion in `bounding set'}
-\end{ttbox}
-Furthermore, each introduction rule is available under its declared
-name. For a codatatype, the component \isa{coinduct} is the coinduction rule,
-replacing the \isa{induct} component.
-
-Recall that the \ttindex{inductive\_cases} declaration generates
-simplified instances of the case analysis rule.  It is as useful for
-inductive definitions as it is for datatypes.  There are many examples
-in the theory
-\isa{Induct/Comb}, which is discussed at length
-elsewhere~\cite{paulson-generic}.  The theory first defines the
-datatype
-\isa{comb} of combinators:
-\begin{alltt*}\isastyleminor
-consts comb :: i
-datatype  "comb" = K
-                 | S
-                 | "#" ("p \isasymin comb", "q \isasymin comb")   (infixl 90)
-\end{alltt*}
-The theory goes on to define contraction and parallel contraction
-inductively.  Then the theory \isa{Induct/Comb.thy} defines special
-cases of contraction, such as this one:
-\begin{isabelle}
-\isacommand{inductive\_cases}\ K\_contractE [elim!]:\ "K -1-> r"
-\end{isabelle}
-The theorem just created is \isa{K -1-> r \ \isasymLongrightarrow \ Q},
-which expresses that the combinator \isa{K} cannot reduce to
-anything.  (From the assumption \isa{K-1->r}, we can conclude any desired
-formula \isa{Q}\@.)  Similar elimination rules for \isa{S} and application are also
-generated. The attribute \isa{elim!}\ shown above supplies the generated
-theorem to the classical reasoner.  This mode of working allows
-effective reasoniung about operational semantics.
-
-\index{*coinductive|)} \index{*inductive|)}
-
-
-
-\section{The outer reaches of set theory}
-
-The constructions of the natural numbers and lists use a suite of
-operators for handling recursive function definitions.  I have described
-the developments in detail elsewhere~\cite{paulson-set-II}.  Here is a brief
-summary:
-\begin{itemize}
-  \item Theory \isa{Trancl} defines the transitive closure of a relation
-    (as a least fixedpoint).
-
-  \item Theory \isa{WF} proves the well-founded recursion theorem, using an
-    elegant approach of Tobias Nipkow.  This theorem permits general
-    recursive definitions within set theory.
-
-  \item Theory \isa{Ord} defines the notions of transitive set and ordinal
-    number.  It derives transfinite induction.  A key definition is {\bf
-      less than}: $i<j$ if and only if $i$ and $j$ are both ordinals and
-    $i\in j$.  As a special case, it includes less than on the natural
-    numbers.
-    
-  \item Theory \isa{Epsilon} derives $\varepsilon$-induction and
-    $\varepsilon$-recursion, which are generalisations of transfinite
-    induction and recursion.  It also defines \cdx{rank}$(x)$, which is the
-    least ordinal $\alpha$ such that $x$ is constructed at stage $\alpha$ of
-    the cumulative hierarchy (thus $x\in V@{\alpha+1}$).
-\end{itemize}
-
-Other important theories lead to a theory of cardinal numbers.  They have
-not yet been written up anywhere.  Here is a summary:
-\begin{itemize}
-\item Theory \isa{Rel} defines the basic properties of relations, such as
-  reflexivity, symmetry and transitivity.
-
-\item Theory \isa{EquivClass} develops a theory of equivalence
-  classes, not using the Axiom of Choice.
-
-\item Theory \isa{Order} defines partial orderings, total orderings and
-  wellorderings.
-
-\item Theory \isa{OrderArith} defines orderings on sum and product sets.
-  These can be used to define ordinal arithmetic and have applications to
-  cardinal arithmetic.
-
-\item Theory \isa{OrderType} defines order types.  Every wellordering is
-  equivalent to a unique ordinal, which is its order type.
-
-\item Theory \isa{Cardinal} defines equipollence and cardinal numbers.
- 
-\item Theory \isa{CardinalArith} defines cardinal addition and
-  multiplication, and proves their elementary laws.  It proves that there
-  is no greatest cardinal.  It also proves a deep result, namely
-  $\kappa\otimes\kappa=\kappa$ for every infinite cardinal~$\kappa$; see
-  Kunen~\cite[page 29]{kunen80}.  None of these results assume the Axiom of
-  Choice, which complicates their proofs considerably.  
-\end{itemize}
-
-The following developments involve the Axiom of Choice (AC):
-\begin{itemize}
-\item Theory \isa{AC} asserts the Axiom of Choice and proves some simple
-  equivalent forms.
-
-\item Theory \isa{Zorn} proves Hausdorff's Maximal Principle, Zorn's Lemma
-  and the Wellordering Theorem, following Abrial and
-  Laffitte~\cite{abrial93}.
-
-\item Theory \isa{Cardinal\_AC} uses AC to prove simplified theorems about
-  the cardinals.  It also proves a theorem needed to justify
-  infinitely branching datatype declarations: if $\kappa$ is an infinite
-  cardinal and $|X(\alpha)| \le \kappa$ for all $\alpha<\kappa$ then
-  $|\union\sb{\alpha<\kappa} X(\alpha)| \le \kappa$.
-
-\item Theory \isa{InfDatatype} proves theorems to justify infinitely
-  branching datatypes.  Arbitrary index sets are allowed, provided their
-  cardinalities have an upper bound.  The theory also justifies some
-  unusual cases of finite branching, involving the finite powerset operator
-  and the finite function space operator.
-\end{itemize}
-
-
-
-\section{The examples directories}
-Directory \isa{HOL/IMP} contains a mechanised version of a semantic
-equivalence proof taken from Winskel~\cite{winskel93}.  It formalises the
-denotational and operational semantics of a simple while-language, then
-proves the two equivalent.  It contains several datatype and inductive
-definitions, and demonstrates their use.
-
-The directory \isa{ZF/ex} contains further developments in ZF set theory.
-Here is an overview; see the files themselves for more details.  I describe
-much of this material in other
-publications~\cite{paulson-set-I,paulson-set-II,paulson-fixedpt-milner}.
-\begin{itemize}
-\item File \isa{misc.ML} contains miscellaneous examples such as
-  Cantor's Theorem, the Schr\"oder-Bernstein Theorem and the `Composition
-  of homomorphisms' challenge~\cite{boyer86}.
-
-\item Theory \isa{Ramsey} proves the finite exponent 2 version of
-  Ramsey's Theorem, following Basin and Kaufmann's
-  presentation~\cite{basin91}.
-
-\item Theory \isa{Integ} develops a theory of the integers as
-  equivalence classes of pairs of natural numbers.
-
-\item Theory \isa{Primrec} develops some computation theory.  It
-  inductively defines the set of primitive recursive functions and presents a
-  proof that Ackermann's function is not primitive recursive.
-
-\item Theory \isa{Primes} defines the Greatest Common Divisor of two
-  natural numbers and and the ``divides'' relation.
-
-\item Theory \isa{Bin} defines a datatype for two's complement binary
-  integers, then proves rewrite rules to perform binary arithmetic.  For
-  instance, $1359\times {-}2468 = {-}3354012$ takes 0.3 seconds.
-
-\item Theory \isa{BT} defines the recursive data structure $\isa{bt}(A)$, labelled binary trees.
-
-\item Theory \isa{Term} defines a recursive data structure for terms
-  and term lists.  These are simply finite branching trees.
-
-\item Theory \isa{TF} defines primitives for solving mutually
-  recursive equations over sets.  It constructs sets of trees and forests
-  as an example, including induction and recursion rules that handle the
-  mutual recursion.
-
-\item Theory \isa{Prop} proves soundness and completeness of
-  propositional logic~\cite{paulson-set-II}.  This illustrates datatype
-  definitions, inductive definitions, structural induction and rule
-  induction.
-
-\item Theory \isa{ListN} inductively defines the lists of $n$
-  elements~\cite{paulin-tlca}.
-
-\item Theory \isa{Acc} inductively defines the accessible part of a
-  relation~\cite{paulin-tlca}.
-
-\item Theory \isa{Comb} defines the datatype of combinators and
-  inductively defines contraction and parallel contraction.  It goes on to
-  prove the Church-Rosser Theorem.  This case study follows Camilleri and
-  Melham~\cite{camilleri92}.
-
-\item Theory \isa{LList} defines lazy lists and a coinduction
-  principle for proving equations between them.
-\end{itemize}
-
-
-\section{A proof about powersets}\label{sec:ZF-pow-example}
-To demonstrate high-level reasoning about subsets, let us prove the
-equation ${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.  Compared
-with first-order logic, set theory involves a maze of rules, and theorems
-have many different proofs.  Attempting other proofs of the theorem might
-be instructive.  This proof exploits the lattice properties of
-intersection.  It also uses the monotonicity of the powerset operation,
-from \isa{ZF/mono.ML}:
-\begin{isabelle}
-\tdx{Pow_mono}:     A \isasymsubseteq B ==> Pow(A) \isasymsubseteq Pow(B)
-\end{isabelle}
-We enter the goal and make the first step, which breaks the equation into
-two inclusions by extensionality:\index{*equalityI theorem}
-\begin{isabelle}
-\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ =\ Pow(A)\ \isasyminter \ Pow(B)\isanewline
-\isacommand{apply}\ (rule\ equalityI)\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\ \isasyminter \ Pow(B)\isanewline
-\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-Both inclusions could be tackled straightforwardly using \isa{subsetI}.
-A shorter proof results from noting that intersection forms the greatest
-lower bound:\index{*Int_greatest theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\isanewline
-\ 2.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
-\ 3.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-Subgoal~1 follows by applying the monotonicity of \isa{Pow} to $A\int
-B\subseteq A$; subgoal~2 follows similarly:
-\index{*Int_lower1 theorem}\index{*Int_lower2 theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_lower1\ [THEN\ Pow\_mono])\isanewline
-\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
-\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\isanewline
-\isacommand{apply}\ (rule\ Int\_lower2\ [THEN\ Pow\_mono])\isanewline
-\ 1.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-We are left with the opposite inclusion, which we tackle in the
-straightforward way:\index{*subsetI theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ subsetI)\isanewline
-\ 1.\ \isasymAnd x.\ x\ \isasymin \ Pow(A)\ \isasyminter \ Pow(B)\ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-The subgoal is to show $x\in \isa{Pow}(A\cap B)$ assuming $x\in\isa{Pow}(A)\cap \isa{Pow}(B)$; eliminating this assumption produces two
-subgoals.  The rule \tdx{IntE} treats the intersection like a conjunction
-instead of unfolding its definition.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ IntE)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
-\end{isabelle}
-The next step replaces the \isa{Pow} by the subset
-relation~($\subseteq$).\index{*PowI theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ PowI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
-\end{isabelle}
-We perform the same replacement in the assumptions.  This is a good
-demonstration of the tactic \ttindex{drule}:\index{*PowD theorem}
-\begin{isabelle}
-\isacommand{apply}\ (drule\ PowD)+\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
-\end{isabelle}
-The assumptions are that $x$ is a lower bound of both $A$ and~$B$, but
-$A\int B$ is the greatest lower bound:\index{*Int_greatest theorem}
-\begin{isabelle}
-\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\isanewline
-\ 2.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ B%
-\end{isabelle}
-To conclude the proof, we clear up the trivial subgoals:
-\begin{isabelle}
-\isacommand{apply}\ (assumption+)\isanewline
-\isacommand{done}%
-\end{isabelle}
-
-We could have performed this proof instantly by calling
-\ttindex{blast}:
-\begin{isabelle}
-\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
-\isacommand{by}
-\end{isabelle}
-Past researchers regarded this as a difficult proof, as indeed it is if all
-the symbols are replaced by their definitions.
-\goodbreak
-
-\section{Monotonicity of the union operator}
-For another example, we prove that general union is monotonic:
-${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
-tackle the inclusion using \tdx{subsetI}:
-\begin{isabelle}
-\isacommand{lemma}\ "C\isasymsubseteq D\ ==>\ Union(C)\
-\isasymsubseteq \ Union(D)"\isanewline
-\isacommand{apply}\ (rule\ subsetI)\isanewline
-\ 1.\ \isasymAnd x.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ \isasymUnion C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
-\end{isabelle}
-Big union is like an existential quantifier --- the occurrence in the
-assumptions must be eliminated early, since it creates parameters.
-\index{*UnionE theorem}
-\begin{isabelle}
-\isacommand{apply}\ (erule\ UnionE)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
-\end{isabelle}
-Now we may apply \tdx{UnionI}, which creates an unknown involving the
-parameters.  To show \isa{x\ \isasymin \ \isasymUnion D} it suffices to show that~\isa{x} belongs
-to some element, say~\isa{?B2(x,B)}, of~\isa{D}\@.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ UnionI)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ D\isanewline
-\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
-\end{isabelle}
-Combining the rule \tdx{subsetD} with the assumption \isa{C\ \isasymsubseteq \ D} yields 
-$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
-\isa{erule} removes the subset assumption.
-\begin{isabelle}
-\isacommand{apply}\ (erule\ subsetD)\isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ C\isanewline
-\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
-\end{isabelle}
-The rest is routine.  Observe how the first call to \isa{assumption}
-instantiates \isa{?B2(x,B)} to~\isa{B}\@.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ B%
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-No\ subgoals!\isanewline
-\isacommand{done}%
-\end{isabelle}
-Again, \isa{blast} can prove this theorem in one step.
-
-The theory \isa{ZF/equalities.thy} has many similar proofs.  Reasoning about
-general intersection can be difficult because of its anomalous behaviour on
-the empty set.  However, \isa{blast} copes well with these.  Here is
-a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
-\[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
-       \Bigl(\inter@{x\in C} A(x)\Bigr)  \int  
-       \Bigl(\inter@{x\in C} B(x)\Bigr)  \]
-
-\section{Low-level reasoning about functions}
-The derived rules \isa{lamI}, \isa{lamE}, \isa{lam_type}, \isa{beta}
-and \isa{eta} support reasoning about functions in a
-$\lambda$-calculus style.  This is generally easier than regarding
-functions as sets of ordered pairs.  But sometimes we must look at the
-underlying representation, as in the following proof
-of~\tdx{fun_disjoint_apply1}.  This states that if $f$ and~$g$ are
-functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
-$(f\un g)`a = f`a$:
-\begin{isabelle}
-\isacommand{lemma}\ "[|\ a\ \isasymin \ A;\ \ f\ \isasymin \ A->B;\ \ g\ \isasymin \ C->D;\ \ A\ \isasyminter \ C\ =\ 0\ |]
-\isanewline
-\ \ \ \ \ \ \ \ ==>\ (f\ \isasymunion \ g)`a\ =\ f`a"
-\end{isabelle}
-Using \tdx{apply_equality}, we reduce the equality to reasoning about
-ordered pairs.  The second subgoal is to verify that \isa{f\ \isasymunion \ g} is a function, since
-\isa{Pi(?A,?B)} denotes a dependent function space.
-\begin{isabelle}
-\isacommand{apply}\ (rule\ apply\_equality)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 1.\ }\isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\ \isasymunion \ g\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
-choose~$f$:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ UnI1)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
-essentially the converse of \tdx{apply_equality}:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ apply\_Pair)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ Pi(?A2,?B2)\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ ?A2\isanewline
-\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 3.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
-from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
-function space, and observe that~{\tt?A2} gets instantiated to~\isa{A}.
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ A\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
-\isaindent{\ 1.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
-\end{isabelle}
-To construct functions of the form $f\un g$, we apply
-\tdx{fun_disjoint_Un}:
-\begin{isabelle}
-\isacommand{apply}\ (rule\ fun\_disjoint\_Un)\isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ ?A3\ \isasymrightarrow \ ?B3\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
-\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ ?A3\ \isasyminter \ ?C3\ =\ 0
-\end{isabelle}
-The remaining subgoals are instances of the assumptions.  Again, observe how
-unknowns become instantiated:
-\begin{isabelle}
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
-\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ ?C3\ =\ 0
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ C\ =\ 0
-\isanewline
-\isacommand{apply}\ assumption\ \isanewline
-No\ subgoals!\isanewline
-\isacommand{done}
-\end{isabelle}
-See the theories \isa{ZF/func.thy} and \isa{ZF/WF.thy} for more
-examples of reasoning about functions.
-
-\index{set theory|)}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/ZF/document/FOL.tex	Mon Aug 27 21:46:00 2012 +0200
@@ -0,0 +1,855 @@
+%!TEX root = logics-ZF.tex
+\chapter{First-Order Logic}
+\index{first-order logic|(}
+
+Isabelle implements Gentzen's natural deduction systems {\sc nj} and {\sc
+  nk}.  Intuitionistic first-order logic is defined first, as theory
+\thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
+obtained by adding the double negation rule.  Basic proof procedures are
+provided.  The intuitionistic prover works with derived rules to simplify
+implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
+classical reasoner, which simulates a sequent calculus.
+
+\section{Syntax and rules of inference}
+The logic is many-sorted, using Isabelle's type classes.  The class of
+first-order terms is called \cldx{term} and is a subclass of \isa{logic}.
+No types of individuals are provided, but extensions can define types such
+as \isa{nat::term} and type constructors such as \isa{list::(term)term}
+(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
+$\alpha$ ranges over class \isa{term}; the equality symbol and quantifiers
+are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
+belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
+Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
+
+Figure~\ref{fol-rules} shows the inference rules with their names.
+Negation is defined in the usual way for intuitionistic logic; $\neg P$
+abbreviates $P\imp\bot$.  The biconditional~($\bimp$) is defined through
+$\conj$ and~$\imp$; introduction and elimination rules are derived for it.
+
+The unique existence quantifier, $\exists!x.P(x)$, is defined in terms
+of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
+quantifications.  For instance, $\exists!x\;y.P(x,y)$ abbreviates
+$\exists!x. \exists!y.P(x,y)$; note that this does not mean that there
+exists a unique pair $(x,y)$ satisfying~$P(x,y)$.
+
+Some intuitionistic derived rules are shown in
+Fig.\ts\ref{fol-int-derived}, again with their names.  These include
+rules for the defined symbols $\neg$, $\bimp$ and $\exists!$.  Natural
+deduction typically involves a combination of forward and backward
+reasoning, particularly with the destruction rules $(\conj E)$,
+$({\imp}E)$, and~$(\forall E)$.  Isabelle's backward style handles these
+rules badly, so sequent-style rules are derived to eliminate conjunctions,
+implications, and universal quantifiers.  Used with elim-resolution,
+\tdx{allE} eliminates a universal quantifier while \tdx{all_dupE}
+re-inserts the quantified formula for later use.  The rules
+\isa{conj\_impE}, etc., support the intuitionistic proof procedure
+(see~\S\ref{fol-int-prover}).
+
+See the on-line theory library for complete listings of the rules and
+derived rules.
+
+\begin{figure} 
+\begin{center}
+\begin{tabular}{rrr} 
+  \it name      &\it meta-type  & \it description \\ 
+  \cdx{Trueprop}& $o\To prop$           & coercion to $prop$\\
+  \cdx{Not}     & $o\To o$              & negation ($\neg$) \\
+  \cdx{True}    & $o$                   & tautology ($\top$) \\
+  \cdx{False}   & $o$                   & absurdity ($\bot$)
+\end{tabular}
+\end{center}
+\subcaption{Constants}
+
+\begin{center}
+\begin{tabular}{llrrr} 
+  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
+  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
+        universal quantifier ($\forall$) \\
+  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
+        existential quantifier ($\exists$) \\
+  \isa{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
+        unique existence ($\exists!$)
+\end{tabular}
+\index{*"E"X"! symbol}
+\end{center}
+\subcaption{Binders} 
+
+\begin{center}
+\index{*"= symbol}
+\index{&@{\tt\&} symbol}
+\index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working
+\index{*"-"-"> symbol}
+\index{*"<"-"> symbol}
+\begin{tabular}{rrrr} 
+  \it symbol    & \it meta-type         & \it priority & \it description \\ 
+  \tt =         & $[\alpha,\alpha]\To o$ & Left 50 & equality ($=$) \\
+  \tt \&        & $[o,o]\To o$          & Right 35 & conjunction ($\conj$) \\
+  \tt |         & $[o,o]\To o$          & Right 30 & disjunction ($\disj$) \\
+  \tt -->       & $[o,o]\To o$          & Right 25 & implication ($\imp$) \\
+  \tt <->       & $[o,o]\To o$          & Right 25 & biconditional ($\bimp$) 
+\end{tabular}
+\end{center}
+\subcaption{Infixes}
+
+\dquotes
+\[\begin{array}{rcl}
+ formula & = & \hbox{expression of type~$o$} \\
+         & | & term " = " term \quad| \quad term " \ttilde= " term \\
+         & | & "\ttilde\ " formula \\
+         & | & formula " \& " formula \\
+         & | & formula " | " formula \\
+         & | & formula " --> " formula \\
+         & | & formula " <-> " formula \\
+         & | & "ALL~" id~id^* " . " formula \\
+         & | & "EX~~" id~id^* " . " formula \\
+         & | & "EX!~" id~id^* " . " formula
+  \end{array}
+\]
+\subcaption{Grammar}
+\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
+\end{figure}
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{refl}        a=a
+\tdx{subst}       [| a=b;  P(a) |] ==> P(b)
+\subcaption{Equality rules}
+
+\tdx{conjI}       [| P;  Q |] ==> P&Q
+\tdx{conjunct1}   P&Q ==> P
+\tdx{conjunct2}   P&Q ==> Q
+
+\tdx{disjI1}      P ==> P|Q
+\tdx{disjI2}      Q ==> P|Q
+\tdx{disjE}       [| P|Q;  P ==> R;  Q ==> R |] ==> R
+
+\tdx{impI}        (P ==> Q) ==> P-->Q
+\tdx{mp}          [| P-->Q;  P |] ==> Q
+
+\tdx{FalseE}      False ==> P
+\subcaption{Propositional rules}
+
+\tdx{allI}        (!!x. P(x))  ==> (ALL x.P(x))
+\tdx{spec}        (ALL x.P(x)) ==> P(x)
+
+\tdx{exI}         P(x) ==> (EX x.P(x))
+\tdx{exE}         [| EX x.P(x);  !!x. P(x) ==> R |] ==> R
+\subcaption{Quantifier rules}
+
+\tdx{True_def}    True        == False-->False
+\tdx{not_def}     ~P          == P-->False
+\tdx{iff_def}     P<->Q       == (P-->Q) & (Q-->P)
+\tdx{ex1_def}     EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)
+\subcaption{Definitions}
+\end{ttbox}
+
+\caption{Rules of intuitionistic logic} \label{fol-rules}
+\end{figure}
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{sym}       a=b ==> b=a
+\tdx{trans}     [| a=b;  b=c |] ==> a=c
+\tdx{ssubst}    [| b=a;  P(a) |] ==> P(b)
+\subcaption{Derived equality rules}
+
+\tdx{TrueI}     True
+
+\tdx{notI}      (P ==> False) ==> ~P
+\tdx{notE}      [| ~P;  P |] ==> R
+
+\tdx{iffI}      [| P ==> Q;  Q ==> P |] ==> P<->Q
+\tdx{iffE}      [| P <-> Q;  [| P-->Q; Q-->P |] ==> R |] ==> R
+\tdx{iffD1}     [| P <-> Q;  P |] ==> Q            
+\tdx{iffD2}     [| P <-> Q;  Q |] ==> P
+
+\tdx{ex1I}      [| P(a);  !!x. P(x) ==> x=a |]  ==>  EX! x. P(x)
+\tdx{ex1E}      [| EX! x.P(x);  !!x.[| P(x);  ALL y. P(y) --> y=x |] ==> R 
+          |] ==> R
+\subcaption{Derived rules for \(\top\), \(\neg\), \(\bimp\) and \(\exists!\)}
+
+\tdx{conjE}     [| P&Q;  [| P; Q |] ==> R |] ==> R
+\tdx{impE}      [| P-->Q;  P;  Q ==> R |] ==> R
+\tdx{allE}      [| ALL x.P(x);  P(x) ==> R |] ==> R
+\tdx{all_dupE}  [| ALL x.P(x);  [| P(x); ALL x.P(x) |] ==> R |] ==> R
+\subcaption{Sequent-style elimination rules}
+
+\tdx{conj_impE} [| (P&Q)-->S;  P-->(Q-->S) ==> R |] ==> R
+\tdx{disj_impE} [| (P|Q)-->S;  [| P-->S; Q-->S |] ==> R |] ==> R
+\tdx{imp_impE}  [| (P-->Q)-->S;  [| P; Q-->S |] ==> Q;  S ==> R |] ==> R
+\tdx{not_impE}  [| ~P --> S;  P ==> False;  S ==> R |] ==> R
+\tdx{iff_impE}  [| (P<->Q)-->S; [| P; Q-->S |] ==> Q; [| Q; P-->S |] ==> P;
+             S ==> R |] ==> R
+\tdx{all_impE}  [| (ALL x.P(x))-->S;  !!x.P(x);  S ==> R |] ==> R
+\tdx{ex_impE}   [| (EX x.P(x))-->S;  P(a)-->S ==> R |] ==> R
+\end{ttbox}
+\subcaption{Intuitionistic simplification of implication}
+\caption{Derived rules for intuitionistic logic} \label{fol-int-derived}
+\end{figure}
+
+
+\section{Generic packages}
+FOL instantiates most of Isabelle's generic packages.
+\begin{itemize}
+\item 
+It instantiates the simplifier, which is invoked through the method 
+\isa{simp}.  Both equality ($=$) and the biconditional
+($\bimp$) may be used for rewriting.  
+
+\item 
+It instantiates the classical reasoner, which is invoked mainly through the 
+methods \isa{blast} and \isa{auto}.  See~\S\ref{fol-cla-prover}
+for details. 
+%
+%\item FOL provides the tactic \ttindex{hyp_subst_tac}, which substitutes for
+%  an equality throughout a subgoal and its hypotheses.  This tactic uses FOL's
+%  general substitution rule.
+\end{itemize}
+
+\begin{warn}\index{simplification!of conjunctions}%
+  Simplifying $a=b\conj P(a)$ to $a=b\conj P(b)$ is often advantageous.  The
+  left part of a conjunction helps in simplifying the right part.  This effect
+  is not available by default: it can be slow.  It can be obtained by
+  including the theorem \isa{conj_cong}\index{*conj_cong (rule)}%
+  as a congruence rule in
+  simplification, \isa{simp cong:\ conj\_cong}.
+\end{warn}
+
+
+\section{Intuitionistic proof procedures} \label{fol-int-prover}
+Implication elimination (the rules~\isa{mp} and~\isa{impE}) pose
+difficulties for automated proof.  In intuitionistic logic, the assumption
+$P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
+use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
+use of $P\imp Q$.  If the proof of~$P$ fails then the whole branch of the
+proof must be abandoned.  Thus intuitionistic propositional logic requires
+backtracking.  
+
+For an elementary example, consider the intuitionistic proof of $Q$ from
+$P\imp Q$ and $(P\imp Q)\imp P$.  The implication $P\imp Q$ is needed
+twice:
+\[ \infer[({\imp}E)]{Q}{P\imp Q &
+       \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
+\]
+The theorem prover for intuitionistic logic does not use~\isa{impE}.\@
+Instead, it simplifies implications using derived rules
+(Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
+to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
+The rules \tdx{conj_impE} and \tdx{disj_impE} are 
+straightforward: $(P\conj Q)\imp S$ is equivalent to $P\imp (Q\imp S)$, and
+$(P\disj Q)\imp S$ is equivalent to the conjunction of $P\imp S$ and $Q\imp
+S$.  The other \ldots\isa{\_impE} rules are unsafe; the method requires
+backtracking.  All the rules are derived in the same simple manner.
+
+Dyckhoff has independently discovered similar rules, and (more importantly)
+has demonstrated their completeness for propositional
+logic~\cite{dyckhoff}.  However, the tactics given below are not complete
+for first-order logic because they discard universally quantified
+assumptions after a single use. These are \ML{} functions, and are listed
+below with their \ML{} type:
+\begin{ttbox} 
+mp_tac              : int -> tactic
+eq_mp_tac           : int -> tactic
+IntPr.safe_step_tac : int -> tactic
+IntPr.safe_tac      :        tactic
+IntPr.inst_step_tac : int -> tactic
+IntPr.step_tac      : int -> tactic
+IntPr.fast_tac      : int -> tactic
+IntPr.best_tac      : int -> tactic
+\end{ttbox}
+Most of these belong to the structure \ML{} structure \texttt{IntPr} and resemble the
+tactics of Isabelle's classical reasoner.  There are no corresponding
+Isar methods, but you can use the 
+\isa{tactic} method to call \ML{} tactics in an Isar script:
+\begin{isabelle}
+\isacommand{apply}\ (tactic\ {* IntPr.fast\_tac 1*})
+\end{isabelle}
+Here is a description of each tactic:
+
+\begin{ttdescription}
+\item[\ttindexbold{mp_tac} {\it i}] 
+attempts to use \tdx{notE} or \tdx{impE} within the assumptions in
+subgoal $i$.  For each assumption of the form $\neg P$ or $P\imp Q$, it
+searches for another assumption unifiable with~$P$.  By
+contradiction with $\neg P$ it can solve the subgoal completely; by Modus
+Ponens it can replace the assumption $P\imp Q$ by $Q$.  The tactic can
+produce multiple outcomes, enumerating all suitable pairs of assumptions.
+
+\item[\ttindexbold{eq_mp_tac} {\it i}] 
+is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
+is safe.
+
+\item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
+subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
+care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
+
+\item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
+subgoals.  It is deterministic, with at most one outcome.
+
+\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
+but allows unknowns to be instantiated.
+
+\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
+    inst_step_tac}, or applies an unsafe rule.  This is the basic step of
+  the intuitionistic proof procedure.
+
+\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
+depth-first search, to solve subgoal~$i$.
+
+\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
+best-first search (guided by the size of the proof state) to solve subgoal~$i$.
+\end{ttdescription}
+Here are some of the theorems that \texttt{IntPr.fast_tac} proves
+automatically.  The latter three date from {\it Principia Mathematica}
+(*11.53, *11.55, *11.61)~\cite{principia}.
+\begin{ttbox}
+~~P & ~~(P --> Q) --> ~~Q
+(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))
+(EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))
+(EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))
+\end{ttbox}
+
+
+
+\begin{figure} 
+\begin{ttbox}
+\tdx{excluded_middle}    ~P | P
+
+\tdx{disjCI}    (~Q ==> P) ==> P|Q
+\tdx{exCI}      (ALL x. ~P(x) ==> P(a)) ==> EX x.P(x)
+\tdx{impCE}     [| P-->Q; ~P ==> R; Q ==> R |] ==> R
+\tdx{iffCE}     [| P<->Q;  [| P; Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
+\tdx{notnotD}   ~~P ==> P
+\tdx{swap}      ~P ==> (~Q ==> P) ==> Q
+\end{ttbox}
+\caption{Derived rules for classical logic} \label{fol-cla-derived}
+\end{figure}
+
+
+\section{Classical proof procedures} \label{fol-cla-prover}
+The classical theory, \thydx{FOL}, consists of intuitionistic logic plus
+the rule
+$$ \vcenter{\infer{P}{\infer*{P}{[\neg P]}}} \eqno(classical) $$
+\noindent
+Natural deduction in classical logic is not really all that natural.  FOL
+derives classical introduction rules for $\disj$ and~$\exists$, as well as
+classical elimination rules for~$\imp$ and~$\bimp$, and the swap rule (see
+Fig.\ts\ref{fol-cla-derived}).
+
+The classical reasoner is installed.  The most useful methods are
+\isa{blast} (pure classical reasoning) and \isa{auto} (classical reasoning
+combined with simplification), but the full range of
+methods is available, including \isa{clarify},
+\isa{fast}, \isa{best} and \isa{force}. 
+ See the 
+\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
+        {Chap.\ts\ref{chap:classical}} 
+and the \emph{Tutorial}~\cite{isa-tutorial}
+for more discussion of classical proof methods.
+
+
+\section{An intuitionistic example}
+Here is a session similar to one in the book {\em Logic and Computation}
+\cite[pages~222--3]{paulson87}. It illustrates the treatment of
+quantifier reasoning, which is different in Isabelle than it is in
+{\sc lcf}-based theorem provers such as {\sc hol}.  
+
+The theory header specifies that we are working in intuitionistic
+logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
+theory:
+\begin{isabelle}
+\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
+\isacommand{begin}
+\end{isabelle}
+The proof begins by entering the goal, then applying the rule $({\imp}I)$.
+\begin{isabelle}
+\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
+\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
+\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
+\isanewline
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymforall x.\ \isasymexists y.\ Q(x,\ y)
+\end{isabelle}
+Isabelle's output is shown as it would appear using Proof General.
+In this example, we shall never have more than one subgoal.  Applying
+$({\imp}I)$ replaces~\isa{\isasymlongrightarrow}
+by~\isa{\isasymLongrightarrow}, so that
+\(\ex{y}\all{x}Q(x,y)\) becomes an assumption.  We have the choice of
+$({\exists}E)$ and $({\forall}I)$; let us try the latter.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ allI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)\hfill\((*)\)
+\end{isabelle}
+Applying $({\forall}I)$ replaces the \isa{\isasymforall
+x} (in ASCII, \isa{ALL~x}) by \isa{\isasymAnd x}
+(or \isa{!!x}), replacing FOL's universal quantifier by Isabelle's 
+meta universal quantifier.  The bound variable is a {\bf parameter} of
+the subgoal.  We now must choose between $({\exists}I)$ and
+$({\exists}E)$.  What happens if the wrong rule is chosen?
+\begin{isabelle}
+\isacommand{apply}\ (rule\ exI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymexists y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ Q(x,\ ?y2(x))
+\end{isabelle}
+The new subgoal~1 contains the function variable \isa{?y2}.  Instantiating
+\isa{?y2} can replace~\isa{?y2(x)} by a term containing~\isa{x}, even
+though~\isa{x} is a bound variable.  Now we analyse the assumption
+\(\exists y.\forall x. Q(x,y)\) using elimination rules:
+\begin{isabelle}
+\isacommand{apply}\ (erule\ exE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\ \isasymLongrightarrow \ Q(x,\ ?y2(x))
+\end{isabelle}
+Applying $(\exists E)$ has produced the parameter \isa{y} and stripped the
+existential quantifier from the assumption.  But the subgoal is unprovable:
+there is no way to unify \isa{?y2(x)} with the bound variable~\isa{y}.
+Using Proof General, we can return to the critical point, marked
+$(*)$ above.  This time we apply $({\exists}E)$:
+\begin{isabelle}
+\isacommand{apply}\ (erule\ exE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ \isasymexists y.\ Q(x,\ y)
+\end{isabelle}
+We now have two parameters and no scheme variables.  Applying
+$({\exists}I)$ and $({\forall}E)$ produces two scheme variables, which are
+applied to those parameters.  Parameters should be produced early, as this
+example demonstrates.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ exI)\isanewline
+\ 1.\ \isasymAnd x\ y.\ \isasymforall x.\ Q(x,\ y)\
+\isasymLongrightarrow \ Q(x,\ ?y3(x,\ y))
+\isanewline
+\isacommand{apply}\ (erule\ allE)\isanewline
+\ 1.\ \isasymAnd x\ y.\ Q(?x4(x,\ y),\ y)\ \isasymLongrightarrow \
+Q(x,\ ?y3(x,\ y))
+\end{isabelle}
+The subgoal has variables \isa{?y3} and \isa{?x4} applied to both
+parameters.  The obvious projection functions unify \isa{?x4(x,y)} with~\isa{
+x} and \isa{?y3(x,y)} with~\isa{y}.
+\begin{isabelle}
+\isacommand{apply}\ assumption\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+The theorem was proved in six method invocations, not counting the
+abandoned ones.  But proof checking is tedious, and the \ML{} tactic
+\ttindex{IntPr.fast_tac} can prove the theorem in one step.
+\begin{isabelle}
+\isacommand{lemma}\ "(EX\ y.\ ALL\ x.\ Q(x,y))\ -->\ \ (ALL\ x.\ EX\ y.\ Q(x,y))"\isanewline
+\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
+\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
+\isanewline
+\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
+No\ subgoals!
+\end{isabelle}
+
+
+\section{An example of intuitionistic negation}
+The following example demonstrates the specialized forms of implication
+elimination.  Even propositional formulae can be difficult to prove from
+the basic rules; the specialized rules help considerably.  
+
+Propositional examples are easy to invent.  As Dummett notes~\cite[page
+28]{dummett}, $\neg P$ is classically provable if and only if it is
+intuitionistically provable;  therefore, $P$ is classically provable if and
+only if $\neg\neg P$ is intuitionistically provable.%
+\footnote{This remark holds only for propositional logic, not if $P$ is
+  allowed to contain quantifiers.}
+%
+Proving $\neg\neg P$ intuitionistically is
+much harder than proving~$P$ classically.
+
+Our example is the double negation of the classical tautology $(P\imp
+Q)\disj (Q\imp P)$.  The first step is apply the
+\isa{unfold} method, which expands
+negations to implications using the definition $\neg P\equiv P\imp\bot$
+and allows use of the special implication rules.
+\begin{isabelle}
+\isacommand{lemma}\ "\isachartilde \ \isachartilde \ ((P-->Q)\ |\ (Q-->P))"\isanewline
+\ 1.\ \isasymnot \ \isasymnot \ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P))
+\isanewline
+\isacommand{apply}\ (unfold\ not\_def)\isanewline
+\ 1.\ ((P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False)\ \isasymlongrightarrow \ False%
+\end{isabelle}
+The next step is a trivial use of $(\imp I)$.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ (P\ \isasymlongrightarrow \ Q)\ \isasymor \ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\ \isasymLongrightarrow \ False%
+\end{isabelle}
+By $(\imp E)$ it would suffice to prove $(P\imp Q)\disj (Q\imp P)$, but
+that formula is not a theorem of intuitionistic logic.  Instead, we
+apply the specialized implication rule \tdx{disj_impE}.  It splits the
+assumption into two assumptions, one for each disjunct.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ disj\_impE)\isanewline
+\ 1.\ \isasymlbrakk (P\ \isasymlongrightarrow \ Q)\ \isasymlongrightarrow \ False;\ (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+We cannot hope to prove $P\imp Q$ or $Q\imp P$ separately, but
+their negations are inconsistent.  Applying \tdx{imp_impE} breaks down
+the assumption $\neg(P\imp Q)$, asking to show~$Q$ while providing new
+assumptions~$P$ and~$\neg Q$.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ imp\_impE)\isanewline
+\ 1.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ P;\ Q\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+Subgoal~2 holds trivially; let us ignore it and continue working on
+subgoal~1.  Thanks to the assumption~$P$, we could prove $Q\imp P$;
+applying \tdx{imp_impE} is simpler.
+\begin{isabelle}
+\ \isacommand{apply}\ (erule\ imp\_impE)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ Q;\ P\ \isasymlongrightarrow \ False\isasymrbrakk \ \isasymLongrightarrow \ P\isanewline
+\ 2.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 3.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\ \isasymlongrightarrow \ False;\ False\isasymrbrakk \ \isasymLongrightarrow \ False%
+\end{isabelle}
+The three subgoals are all trivial.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk P;\ Q\ \isasymlongrightarrow \ False;\
+False\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 2.\ \isasymlbrakk (Q\ \isasymlongrightarrow \ P)\
+\isasymlongrightarrow \ False;\ False\isasymrbrakk \
+\isasymLongrightarrow \ False%
+\isanewline
+\isacommand{apply}\ (erule\ FalseE)+\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+This proof is also trivial for the \ML{} tactic \isa{IntPr.fast_tac}.
+
+
+\section{A classical example} \label{fol-cla-example}
+To illustrate classical logic, we shall prove the theorem
+$\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
+follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
+$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we must
+work in a theory based on classical logic, the theory \isa{FOL}:
+\begin{isabelle}
+\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}
+\end{isabelle}
+
+The formal proof does not conform in any obvious way to the sketch given
+above.  Its key step is its first rule, \tdx{exCI}, a classical
+version of~$(\exists I)$ that allows multiple instantiation of the
+quantifier.
+\begin{isabelle}
+\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ (rule\ exCI)\isanewline
+\ 1.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ \isasymforall x.\ P(?a)\ \isasymlongrightarrow \ P(x)
+\end{isabelle}
+We can either exhibit a term \isa{?a} to satisfy the conclusion of
+subgoal~1, or produce a contradiction from the assumption.  The next
+steps are routine.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ allI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x))\ \isasymLongrightarrow \ P(?a)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk \isasymforall y.\ \isasymnot \ (\isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x));\ P(?a)\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\end{isabelle}
+By the duality between $\exists$ and~$\forall$, applying~$(\forall E)$
+is equivalent to applying~$(\exists I)$ again.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ allE)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk P(?a);\ \isasymnot \ (\isasymforall xa.\ P(?y3(x))\ \isasymlongrightarrow \ P(xa))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\end{isabelle}
+In classical logic, a negated assumption is equivalent to a conclusion.  To
+get this effect, we create a swapped version of $(\forall I)$ and apply it
+using \ttindex{erule}.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ allI\ [THEN\ [2]\ swap])\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x)\isasymrbrakk \ \isasymLongrightarrow \ P(?y3(x))\ \isasymlongrightarrow \ P(xa)
+\end{isabelle}
+The operand of \isa{erule} above denotes the following theorem:
+\begin{isabelle}
+\ \ \ \ \isasymlbrakk \isasymnot \ (\isasymforall x.\ ?P1(x));\
+\isasymAnd x.\ \isasymnot \ ?P\ \isasymLongrightarrow \
+?P1(x)\isasymrbrakk \
+\isasymLongrightarrow \ ?P%
+\end{isabelle}
+The previous conclusion, \isa{P(x)}, has become a negated assumption.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ impI)\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ \isasymnot \ P(x);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(xa)
+\end{isabelle}
+The subgoal has three assumptions.  We produce a contradiction between the
+\index{assumptions!contradictory} assumptions~\isa{\isasymnot P(x)}
+and~\isa{P(?y3(x))}.   The proof never instantiates the
+unknown~\isa{?a}.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ notE)\isanewline
+\ 1.\ \isasymAnd x\ xa.\ \isasymlbrakk P(?a);\ P(?y3(x))\isasymrbrakk \ \isasymLongrightarrow \ P(x)
+\isanewline
+\isacommand{apply}\ assumption\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+The civilised way to prove this theorem is using the
+\methdx{blast} method, which automatically uses the classical form
+of the rule~$(\exists I)$:
+\begin{isabelle}
+\isacommand{lemma}\ "EX\ y.\ ALL\ x.\ P(y)-->P(x)"\isanewline
+\ 1.\ \isasymexists y.\ \isasymforall x.\ P(y)\ \isasymlongrightarrow \ P(x)
+\isanewline
+\isacommand{by}\ blast\isanewline
+No\ subgoals!
+\end{isabelle}
+If this theorem seems counterintuitive, then perhaps you are an
+intuitionist.  In constructive logic, proving $\ex{y}\all{x}P(y)\imp P(x)$
+requires exhibiting a particular term~$t$ such that $\all{x}P(t)\imp P(x)$,
+which we cannot do without further knowledge about~$P$.
+
+
+\section{Derived rules and the classical tactics}
+Classical first-order logic can be extended with the propositional
+connective $if(P,Q,R)$, where 
+$$ if(P,Q,R) \equiv P\conj Q \disj \neg P \conj R. \eqno(if) $$
+Theorems about $if$ can be proved by treating this as an abbreviation,
+replacing $if(P,Q,R)$ by $P\conj Q \disj \neg P \conj R$ in subgoals.  But
+this duplicates~$P$, causing an exponential blowup and an unreadable
+formula.  Introducing further abbreviations makes the problem worse.
+
+Natural deduction demands rules that introduce and eliminate $if(P,Q,R)$
+directly, without reference to its definition.  The simple identity
+\[ if(P,Q,R) \,\bimp\, (P\imp Q)\conj (\neg P\imp R) \]
+suggests that the
+$if$-introduction rule should be
+\[ \infer[({if}\,I)]{if(P,Q,R)}{\infer*{Q}{[P]}  &  \infer*{R}{[\neg P]}} \]
+The $if$-elimination rule reflects the definition of $if(P,Q,R)$ and the
+elimination rules for~$\disj$ and~$\conj$.
+\[ \infer[({if}\,E)]{S}{if(P,Q,R) & \infer*{S}{[P,Q]}
+                                  & \infer*{S}{[\neg P,R]}} 
+\]
+Having made these plans, we get down to work with Isabelle.  The theory of
+classical logic, \texttt{FOL}, is extended with the constant
+$if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
+equation~$(if)$.
+\begin{isabelle}
+\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}\isanewline
+\isacommand{constdefs}\isanewline
+\ \ if\ ::\ "[o,o,o]=>o"\isanewline
+\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
+\end{isabelle}
+We create the file \texttt{If.thy} containing these declarations.  (This file
+is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
+\begin{isabelle}
+use_thy "If";  
+\end{isabelle}
+loads that theory and sets it to be the current context.
+
+
+\subsection{Deriving the introduction rule}
+
+The derivations of the introduction and elimination rules demonstrate the
+methods for rewriting with definitions.  Classical reasoning is required,
+so we use \isa{blast}.
+
+The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
+concludes $if(P,Q,R)$.  We propose this lemma and immediately simplify
+using \isa{if\_def} to expand the definition of \isa{if} in the
+subgoal.
+\begin{isabelle}
+\isacommand{lemma}\ ifI: "[|\ P\ ==>\ Q;\ \isachartilde P\ ==>\ R\
+|]\ ==>\ if(P,Q,R)"\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ Q,\ R)
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymLongrightarrow \ Q;\ \isasymnot \ P\ \isasymLongrightarrow \ R\isasymrbrakk \ \isasymLongrightarrow \ P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \
+R
+\end{isabelle}
+The rule's premises, although expressed using meta-level implication
+(\isa{\isasymLongrightarrow}) are passed as ordinary implications to
+\methdx{blast}.  
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+
+\subsection{Deriving the elimination rule}
+The elimination rule has three premises, two of which are themselves rules.
+The conclusion is simply $S$.
+\begin{isabelle}
+\isacommand{lemma}\ ifE:\isanewline
+\ \ \ "[|\ if(P,Q,R);\ \ [|P;\ Q|]\ ==>\ S;\ [|\isachartilde P;\ R|]\ ==>\ S\ |]\ ==>\ S"\isanewline
+\ 1.\ \isasymlbrakk if(P,\ Q,\ R);\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ \isasymlbrakk P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R;\ \isasymlbrakk P;\ Q\isasymrbrakk \ \isasymLongrightarrow \ S;\ \isasymlbrakk \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ S\isasymrbrakk \ \isasymLongrightarrow \ S%
+\end{isabelle}
+The proof script is the same as before: \isa{simp} followed by
+\isa{blast}:
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+
+\subsection{Using the derived rules}
+Our new derived rules, \tdx{ifI} and~\tdx{ifE}, permit natural
+proofs of theorems such as the following:
+\begin{eqnarray*}
+    if(P, if(Q,A,B), if(Q,C,D)) & \bimp & if(Q,if(P,A,C),if(P,B,D)) \\
+    if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
+\end{eqnarray*}
+Proofs also require the classical reasoning rules and the $\bimp$
+introduction rule (called~\tdx{iffI}: do not confuse with~\isa{ifI}). 
+
+To display the $if$-rules in action, let us analyse a proof step by step.
+\begin{isabelle}
+\isacommand{lemma}\ if\_commute:\isanewline
+\ \ \ \ \ "if(P,\ if(Q,A,B),\
+if(Q,C,D))\ <->\ if(Q,\ if(P,A,C),\ if(P,B,D))"\isanewline
+\isacommand{apply}\ (rule\ iffI)\isanewline
+\ 1.\ if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 1.\ }if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 2.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+The $if$-elimination rule can be applied twice in succession.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ ifE)\isanewline
+\ 1.\ \isasymlbrakk P;\ if(Q,\ A,\ B)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 3.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 3.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\isanewline
+\isacommand{apply}\ (erule\ ifE)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 2.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 3.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 4.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 4.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+%
+In the first two subgoals, all assumptions have been reduced to atoms.  Now
+$if$-introduction can be applied.  Observe how the $if$-rules break down
+occurrences of $if$ when they become the outermost connective.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ ifI)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ A,\ C)\isanewline
+\ 2.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
+\ 3.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 4.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 5.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 5.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\isanewline
+\isacommand{apply}\ (rule\ ifI)\isanewline
+\ 1.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ P\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 2.\ \isasymlbrakk P;\ Q;\ A;\ Q;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \ C\isanewline
+\ 3.\ \isasymlbrakk P;\ Q;\ A;\ \isasymnot \ Q\isasymrbrakk \ \isasymLongrightarrow \ if(P,\ B,\ D)\isanewline
+\ 4.\ \isasymlbrakk P;\ \isasymnot \ Q;\ B\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 5.\ \isasymlbrakk \isasymnot \ P;\ if(Q,\ C,\ D)\isasymrbrakk \ \isasymLongrightarrow \ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\isanewline
+\ 6.\ if(Q,\ if(P,\ A,\ C),\ if(P,\ B,\ D))\ \isasymLongrightarrow \isanewline
+\isaindent{\ 6.\ }if(P,\ if(Q,\ A,\ B),\ if(Q,\ C,\ D))
+\end{isabelle}
+Where do we stand?  The first subgoal holds by assumption; the second and
+third, by contradiction.  This is getting tedious.  We could use the classical
+reasoner, but first let us extend the default claset with the derived rules
+for~$if$.
+\begin{isabelle}
+\isacommand{declare}\ ifI\ [intro!]\isanewline
+\isacommand{declare}\ ifE\ [elim!]
+\end{isabelle}
+With these declarations, we could have proved this theorem with a single
+call to \isa{blast}.  Here is another example:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
+\isanewline
+\isacommand{by}\ blast
+\end{isabelle}
+
+
+\subsection{Derived rules versus definitions}
+Dispensing with the derived rules, we can treat $if$ as an
+abbreviation, and let \ttindex{blast_tac} prove the expanded formula.  Let
+us redo the previous proof:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,A,B))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ A,\ B))
+\end{isabelle}
+This time, we simply unfold using the definition of $if$:
+\begin{isabelle}
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
+\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ A\ \isasymor \ \isasymnot \ R\ \isasymand \ B)
+\end{isabelle}
+We are left with a subgoal in pure first-order logic, and it falls to
+\isa{blast}:
+\begin{isabelle}
+\isacommand{apply}\ blast\isanewline
+No\ subgoals!
+\end{isabelle}
+Expanding definitions reduces the extended logic to the base logic.  This
+approach has its merits, but it can be slow.  In these examples, proofs
+using the derived rules for~\isa{if} run about six
+times faster  than proofs using just the rules of first-order logic.
+
+Expanding definitions can also make it harder to diagnose errors. 
+Suppose we are having difficulties in proving some goal.  If by expanding
+definitions we have made it unreadable, then we have little hope of
+diagnosing the problem.
+
+Attempts at program verification often yield invalid assertions.
+Let us try to prove one:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
+A))
+\end{isabelle}
+Calling \isa{blast} yields an uninformative failure message. We can
+get a closer look at the situation by applying \methdx{auto}.
+\begin{isabelle}
+\isacommand{apply}\ auto\isanewline
+\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
+\ 2.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 3.\ \isasymlbrakk B;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ A\isanewline
+\ 4.\ \isasymlbrakk \isasymnot \ R;\ A;\ \isasymnot \ B;\ \isasymnot \ P\isasymrbrakk \ \isasymLongrightarrow \
+False
+\end{isabelle}
+Subgoal~1 is unprovable and yields a countermodel: $P$ and~$B$ are false
+while~$R$ and~$A$ are true.  This truth assignment reduces the main goal to
+$true\bimp false$, which is of course invalid.
+
+We can repeat this analysis by expanding definitions, using just the rules of
+first-order logic:
+\begin{isabelle}
+\isacommand{lemma}\ "if(if(P,Q,R),\ A,\ B)\ <->\ if(P,\ if(Q,A,B),\ if(R,B,A))"\isanewline
+\ 1.\ if(if(P,\ Q,\ R),\ A,\ B)\ \isasymlongleftrightarrow \ if(P,\ if(Q,\ A,\ B),\ if(R,\ B,\
+A))
+\isanewline
+\isacommand{apply}\ (simp\ add:\ if\_def)\isanewline
+\ 1.\ (P\ \isasymand \ Q\ \isasymor \ \isasymnot \ P\ \isasymand \ R)\ \isasymand \ A\ \isasymor \ (\isasymnot \ P\ \isasymor \ \isasymnot \ Q)\ \isasymand \ (P\ \isasymor \ \isasymnot \ R)\ \isasymand \ B\ \isasymlongleftrightarrow \isanewline
+\isaindent{\ 1.\ }P\ \isasymand \ (Q\ \isasymand \ A\ \isasymor \ \isasymnot \ Q\ \isasymand \ B)\ \isasymor \ \isasymnot \ P\ \isasymand \ (R\ \isasymand \ B\ \isasymor \ \isasymnot \ R\ \isasymand \ A)
+\end{isabelle}
+Again \isa{blast} would fail, so we try \methdx{auto}:
+\begin{isabelle}
+\isacommand{apply}\ (auto)\ \isanewline
+\ 1.\ \isasymlbrakk A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ B\isanewline
+\ 2.\ \isasymlbrakk A;\ \isasymnot \ P;\ R;\ \isasymnot \ B\isasymrbrakk \ \isasymLongrightarrow \ Q\isanewline
+\ 3.\ \isasymlbrakk B;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 4.\ \isasymlbrakk B;\ \isasymnot \ P;\ \isasymnot \ A;\ \isasymnot \ R;\ Q\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 5.\ \isasymlbrakk B;\ \isasymnot \ Q;\ \isasymnot \ R;\ \isasymnot \ P;\ \isasymnot \ A\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 6.\ \isasymlbrakk B;\ \isasymnot \ A;\ \isasymnot \ P;\ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 7.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ False\isanewline
+\ 8.\ \isasymlbrakk \isasymnot \ P;\ A;\ \isasymnot \ B;\ \isasymnot \ R\isasymrbrakk \ \isasymLongrightarrow \ Q%
+\end{isabelle}
+Subgoal~1 yields the same countermodel as before.  But each proof step has
+taken six times as long, and the final result contains twice as many subgoals.
+
+Expanding your definitions usually makes proofs more difficult.  This is
+why the classical prover has been designed to accept derived rules.
+
+\index{first-order logic|)}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/ZF/document/ZF.tex	Mon Aug 27 21:46:00 2012 +0200
@@ -0,0 +1,2717 @@
+\chapter{Zermelo-Fraenkel Set Theory}
+\index{set theory|(}
+
+The theory~\thydx{ZF} implements Zermelo-Fraenkel set
+theory~\cite{halmos60,suppes72} as an extension of~\texttt{FOL}, classical
+first-order logic.  The theory includes a collection of derived natural
+deduction rules, for use with Isabelle's classical reasoner.  Some
+of it is based on the work of No\"el~\cite{noel}.
+
+A tremendous amount of set theory has been formally developed, including the
+basic properties of relations, functions, ordinals and cardinals.  Significant
+results have been proved, such as the Schr\"oder-Bernstein Theorem, the
+Wellordering Theorem and a version of Ramsey's Theorem.  \texttt{ZF} provides
+both the integers and the natural numbers.  General methods have been
+developed for solving recursion equations over monotonic functors; these have
+been applied to yield constructions of lists, trees, infinite lists, etc.
+
+\texttt{ZF} has a flexible package for handling inductive definitions,
+such as inference systems, and datatype definitions, such as lists and
+trees.  Moreover it handles coinductive definitions, such as
+bisimulation relations, and codatatype definitions, such as streams.  It
+provides a streamlined syntax for defining primitive recursive functions over
+datatypes. 
+
+Published articles~\cite{paulson-set-I,paulson-set-II} describe \texttt{ZF}
+less formally than this chapter.  Isabelle employs a novel treatment of
+non-well-founded data structures within the standard {\sc zf} axioms including
+the Axiom of Foundation~\cite{paulson-mscs}.
+
+
+\section{Which version of axiomatic set theory?}
+The two main axiom systems for set theory are Bernays-G\"odel~({\sc bg})
+and Zermelo-Fraenkel~({\sc zf}).  Resolution theorem provers can use {\sc
+  bg} because it is finite~\cite{boyer86,quaife92}.  {\sc zf} does not
+have a finite axiom system because of its Axiom Scheme of Replacement.
+This makes it awkward to use with many theorem provers, since instances
+of the axiom scheme have to be invoked explicitly.  Since Isabelle has no
+difficulty with axiom schemes, we may adopt either axiom system.
+
+These two theories differ in their treatment of {\bf classes}, which are
+collections that are `too big' to be sets.  The class of all sets,~$V$,
+cannot be a set without admitting Russell's Paradox.  In {\sc bg}, both
+classes and sets are individuals; $x\in V$ expresses that $x$ is a set.  In
+{\sc zf}, all variables denote sets; classes are identified with unary
+predicates.  The two systems define essentially the same sets and classes,
+with similar properties.  In particular, a class cannot belong to another
+class (let alone a set).
+
+Modern set theorists tend to prefer {\sc zf} because they are mainly concerned
+with sets, rather than classes.  {\sc bg} requires tiresome proofs that various
+collections are sets; for instance, showing $x\in\{x\}$ requires showing that
+$x$ is a set.
+
+
+\begin{figure} \small
+\begin{center}
+\begin{tabular}{rrr} 
+  \it name      &\it meta-type  & \it description \\ 
+  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder\\
+  \cdx{0}       & $i$           & empty set\\
+  \cdx{cons}    & $[i,i]\To i$  & finite set constructor\\
+  \cdx{Upair}   & $[i,i]\To i$  & unordered pairing\\
+  \cdx{Pair}    & $[i,i]\To i$  & ordered pairing\\
+  \cdx{Inf}     & $i$   & infinite set\\
+  \cdx{Pow}     & $i\To i$      & powerset\\
+  \cdx{Union} \cdx{Inter} & $i\To i$    & set union/intersection \\
+  \cdx{split}   & $[[i,i]\To i, i] \To i$ & generalized projection\\
+  \cdx{fst} \cdx{snd}   & $i\To i$      & projections\\
+  \cdx{converse}& $i\To i$      & converse of a relation\\
+  \cdx{succ}    & $i\To i$      & successor\\
+  \cdx{Collect} & $[i,i\To o]\To i$     & separation\\
+  \cdx{Replace} & $[i, [i,i]\To o] \To i$       & replacement\\
+  \cdx{PrimReplace} & $[i, [i,i]\To o] \To i$   & primitive replacement\\
+  \cdx{RepFun}  & $[i, i\To i] \To i$   & functional replacement\\
+  \cdx{Pi} \cdx{Sigma}  & $[i,i\To i]\To i$     & general product/sum\\
+  \cdx{domain}  & $i\To i$      & domain of a relation\\
+  \cdx{range}   & $i\To i$      & range of a relation\\
+  \cdx{field}   & $i\To i$      & field of a relation\\
+  \cdx{Lambda}  & $[i, i\To i]\To i$    & $\lambda$-abstraction\\
+  \cdx{restrict}& $[i, i] \To i$        & restriction of a function\\
+  \cdx{The}     & $[i\To o]\To i$       & definite description\\
+  \cdx{if}      & $[o,i,i]\To i$        & conditional\\
+  \cdx{Ball} \cdx{Bex}  & $[i, i\To o]\To o$    & bounded quantifiers
+\end{tabular}
+\end{center}
+\subcaption{Constants}
+
+\begin{center}
+\index{*"`"` symbol}
+\index{*"-"`"` symbol}
+\index{*"` symbol}\index{function applications}
+\index{*"- symbol}
+\index{*": symbol}
+\index{*"<"= symbol}
+\begin{tabular}{rrrr} 
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \tt ``        & $[i,i]\To i$  &  Left 90      & image \\
+  \tt -``       & $[i,i]\To i$  &  Left 90      & inverse image \\
+  \tt `         & $[i,i]\To i$  &  Left 90      & application \\
+  \sdx{Int}     & $[i,i]\To i$  &  Left 70      & intersection ($\int$) \\
+  \sdx{Un}      & $[i,i]\To i$  &  Left 65      & union ($\un$) \\
+  \tt -         & $[i,i]\To i$  &  Left 65      & set difference ($-$) \\[1ex]
+  \tt:          & $[i,i]\To o$  &  Left 50      & membership ($\in$) \\
+  \tt <=        & $[i,i]\To o$  &  Left 50      & subset ($\subseteq$) 
+\end{tabular}
+\end{center}
+\subcaption{Infixes}
+\caption{Constants of ZF} \label{zf-constants}
+\end{figure} 
+
+
+\section{The syntax of set theory}
+The language of set theory, as studied by logicians, has no constants.  The
+traditional axioms merely assert the existence of empty sets, unions,
+powersets, etc.; this would be intolerable for practical reasoning.  The
+Isabelle theory declares constants for primitive sets.  It also extends
+\texttt{FOL} with additional syntax for finite sets, ordered pairs,
+comprehension, general union/intersection, general sums/products, and
+bounded quantifiers.  In most other respects, Isabelle implements precisely
+Zermelo-Fraenkel set theory.
+
+Figure~\ref{zf-constants} lists the constants and infixes of~ZF, while
+Figure~\ref{zf-trans} presents the syntax translations.  Finally,
+Figure~\ref{zf-syntax} presents the full grammar for set theory, including the
+constructs of FOL.
+
+Local abbreviations can be introduced by a \isa{let} construct whose
+syntax appears in Fig.\ts\ref{zf-syntax}.  Internally it is translated into
+the constant~\cdx{Let}.  It can be expanded by rewriting with its
+definition, \tdx{Let_def}.
+
+Apart from \isa{let}, set theory does not use polymorphism.  All terms in
+ZF have type~\tydx{i}, which is the type of individuals and has
+class~\cldx{term}.  The type of first-order formulae, remember, 
+is~\tydx{o}.
+
+Infix operators include binary union and intersection ($A\un B$ and
+$A\int B$), set difference ($A-B$), and the subset and membership
+relations.  Note that $a$\verb|~:|$b$ is translated to $\lnot(a\in b)$,
+which is equivalent to  $a\notin b$.  The
+union and intersection operators ($\bigcup A$ and $\bigcap A$) form the
+union or intersection of a set of sets; $\bigcup A$ means the same as
+$\bigcup@{x\in A}x$.  Of these operators, only $\bigcup A$ is primitive.
+
+The constant \cdx{Upair} constructs unordered pairs; thus \isa{Upair($A$,$B$)} denotes the set~$\{A,B\}$ and
+\isa{Upair($A$,$A$)} denotes the singleton~$\{A\}$.  General union is
+used to define binary union.  The Isabelle version goes on to define
+the constant
+\cdx{cons}:
+\begin{eqnarray*}
+   A\cup B              & \equiv &       \bigcup(\isa{Upair}(A,B)) \\
+   \isa{cons}(a,B)      & \equiv &        \isa{Upair}(a,a) \un B
+\end{eqnarray*}
+The $\{a@1, \ldots\}$ notation abbreviates finite sets constructed in the
+obvious manner using~\isa{cons} and~$\emptyset$ (the empty set) \isasymin \begin{eqnarray*}
+ \{a,b,c\} & \equiv & \isa{cons}(a,\isa{cons}(b,\isa{cons}(c,\emptyset)))
+\end{eqnarray*}
+
+The constant \cdx{Pair} constructs ordered pairs, as in \isa{Pair($a$,$b$)}.  Ordered pairs may also be written within angle brackets,
+as {\tt<$a$,$b$>}.  The $n$-tuple {\tt<$a@1$,\ldots,$a@{n-1}$,$a@n$>}
+abbreviates the nest of pairs\par\nobreak
+\centerline{\isa{Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots).}}
+
+In ZF, a function is a set of pairs.  A ZF function~$f$ is simply an
+individual as far as Isabelle is concerned: its Isabelle type is~$i$, not say
+$i\To i$.  The infix operator~{\tt`} denotes the application of a function set
+to its argument; we must write~$f{\tt`}x$, not~$f(x)$.  The syntax for image
+is~$f{\tt``}A$ and that for inverse image is~$f{\tt-``}A$.
+
+
+\begin{figure} 
+\index{lambda abs@$\lambda$-abstractions}
+\index{*"-"> symbol}
+\index{*"* symbol}
+\begin{center} \footnotesize\tt\frenchspacing
+\begin{tabular}{rrr} 
+  \it external          & \it internal  & \it description \\ 
+  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm negated membership\\
+  \ttlbrace$a@1$, $\ldots$, $a@n$\ttrbrace  &  cons($a@1$,$\ldots$,cons($a@n$,0)) &
+        \rm finite set \\
+  <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
+        Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
+        \rm ordered $n$-tuple \\
+  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
+        \rm separation \\
+  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
+        \rm replacement \\
+  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
+        \rm functional replacement \\
+  \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
+        \rm general intersection \\
+  \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
+        \rm general union \\
+  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
+        \rm general product \\
+  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
+        \rm general sum \\
+  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
+        \rm function space \\
+  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
+        \rm binary product \\
+  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
+        \rm definite description \\
+  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
+        \rm $\lambda$-abstraction\\[1ex]
+  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
+        \rm bounded $\forall$ \\
+  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
+        \rm bounded $\exists$
+\end{tabular}
+\end{center}
+\caption{Translations for ZF} \label{zf-trans}
+\end{figure} 
+
+
+\begin{figure} 
+\index{*let symbol}
+\index{*in symbol}
+\dquotes
+\[\begin{array}{rcl}
+    term & = & \hbox{expression of type~$i$} \\
+         & | & "let"~id~"="~term";"\dots";"~id~"="~term~"in"~term \\
+         & | & "if"~term~"then"~term~"else"~term \\
+         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
+         & | & "< "  term\; ("," term)^* " >"  \\
+         & | & "{\ttlbrace} " id ":" term " . " formula " {\ttrbrace}" \\
+         & | & "{\ttlbrace} " id " . " id ":" term ", " formula " {\ttrbrace}" \\
+         & | & "{\ttlbrace} " term " . " id ":" term " {\ttrbrace}" \\
+         & | & term " `` " term \\
+         & | & term " -`` " term \\
+         & | & term " ` " term \\
+         & | & term " * " term \\
+         & | & term " \isasyminter " term \\
+         & | & term " \isasymunion " term \\
+         & | & term " - " term \\
+         & | & term " -> " term \\
+         & | & "THE~~"  id  " . " formula\\
+         & | & "lam~~"  id ":" term " . " term \\
+         & | & "INT~~"  id ":" term " . " term \\
+         & | & "UN~~~"  id ":" term " . " term \\
+         & | & "PROD~"  id ":" term " . " term \\
+         & | & "SUM~~"  id ":" term " . " term \\[2ex]
+ formula & = & \hbox{expression of type~$o$} \\
+         & | & term " : " term \\
+         & | & term " \ttilde: " term \\
+         & | & term " <= " term \\
+         & | & term " = " term \\
+         & | & term " \ttilde= " term \\
+         & | & "\ttilde\ " formula \\
+         & | & formula " \& " formula \\
+         & | & formula " | " formula \\
+         & | & formula " --> " formula \\
+         & | & formula " <-> " formula \\
+         & | & "ALL " id ":" term " . " formula \\
+         & | & "EX~~" id ":" term " . " formula \\
+         & | & "ALL~" id~id^* " . " formula \\
+         & | & "EX~~" id~id^* " . " formula \\
+         & | & "EX!~" id~id^* " . " formula
+  \end{array}
+\]
+\caption{Full grammar for ZF} \label{zf-syntax}
+\end{figure} 
+
+
+\section{Binding operators}
+The constant \cdx{Collect} constructs sets by the principle of {\bf
+  separation}.  The syntax for separation is
+\hbox{\tt\ttlbrace$x$:$A$.\ $P[x]$\ttrbrace}, where $P[x]$ is a formula
+that may contain free occurrences of~$x$.  It abbreviates the set \isa{Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
+satisfy~$P[x]$.  Note that \isa{Collect} is an unfortunate choice of
+name: some set theories adopt a set-formation principle, related to
+replacement, called collection.
+
+The constant \cdx{Replace} constructs sets by the principle of {\bf
+  replacement}.  The syntax
+\hbox{\tt\ttlbrace$y$.\ $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set 
+\isa{Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
+that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
+has the condition that $Q$ must be single-valued over~$A$: for
+all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
+single-valued binary predicate is also called a {\bf class function}.
+
+The constant \cdx{RepFun} expresses a special case of replacement,
+where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
+single-valued, since it is just the graph of the meta-level
+function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
+for~$x\in A$.  This is analogous to the \ML{} functional \isa{map},
+since it applies a function to every element of a set.  The syntax is
+\isa{\ttlbrace$b[x]$.\ $x$:$A$\ttrbrace}, which expands to 
+\isa{RepFun($A$,$\lambda x. b[x]$)}.
+
+\index{*INT symbol}\index{*UN symbol} 
+General unions and intersections of indexed
+families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
+are written \isa{UN $x$:$A$.\ $B[x]$} and \isa{INT $x$:$A$.\ $B[x]$}.
+Their meaning is expressed using \isa{RepFun} as
+\[
+\bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
+\bigcap(\{B[x]. x\in A\}). 
+\]
+General sums $\sum@{x\in A}B[x]$ and products $\prod@{x\in A}B[x]$ can be
+constructed in set theory, where $B[x]$ is a family of sets over~$A$.  They
+have as special cases $A\times B$ and $A\to B$, where $B$ is simply a set.
+This is similar to the situation in Constructive Type Theory (set theory
+has `dependent sets') and calls for similar syntactic conventions.  The
+constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
+products.  Instead of \isa{Sigma($A$,$B$)} and \isa{Pi($A$,$B$)} we may
+write 
+\isa{SUM $x$:$A$.\ $B[x]$} and \isa{PROD $x$:$A$.\ $B[x]$}.  
+\index{*SUM symbol}\index{*PROD symbol}%
+The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
+general sums and products over a constant family.\footnote{Unlike normal
+infix operators, {\tt*} and {\tt->} merely define abbreviations; there are
+no constants~\isa{op~*} and~\isa{op~->}.} Isabelle accepts these
+abbreviations in parsing and uses them whenever possible for printing.
+
+\index{*THE symbol} As mentioned above, whenever the axioms assert the
+existence and uniqueness of a set, Isabelle's set theory declares a constant
+for that set.  These constants can express the {\bf definite description}
+operator~$\iota x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$,
+if such exists.  Since all terms in ZF denote something, a description is
+always meaningful, but we do not know its value unless $P[x]$ defines it
+uniquely.  Using the constant~\cdx{The}, we may write descriptions as 
+\isa{The($\lambda x. P[x]$)} or use the syntax \isa{THE $x$.\ $P[x]$}.
+
+\index{*lam symbol}
+Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
+stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
+this to be a set, the function's domain~$A$ must be given.  Using the
+constant~\cdx{Lambda}, we may express function sets as \isa{Lambda($A$,$\lambda x. b[x]$)} or use the syntax \isa{lam $x$:$A$.\ $b[x]$}.
+
+Isabelle's set theory defines two {\bf bounded quantifiers}:
+\begin{eqnarray*}
+   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
+   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
+\end{eqnarray*}
+The constants~\cdx{Ball} and~\cdx{Bex} are defined
+accordingly.  Instead of \isa{Ball($A$,$P$)} and \isa{Bex($A$,$P$)} we may
+write
+\isa{ALL $x$:$A$.\ $P[x]$} and \isa{EX $x$:$A$.\ $P[x]$}.
+
+
+%%%% ZF.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Let_def}:           Let(s, f) == f(s)
+
+\tdx{Ball_def}:          Ball(A,P) == {\isasymforall}x. x \isasymin A --> P(x)
+\tdx{Bex_def}:           Bex(A,P)  == {\isasymexists}x. x \isasymin A & P(x)
+
+\tdx{subset_def}:        A \isasymsubseteq B  == {\isasymforall}x \isasymin A. x \isasymin B
+\tdx{extension}:         A = B  <->  A \isasymsubseteq B & B \isasymsubseteq A
+
+\tdx{Union_iff}:         A \isasymin Union(C) <-> ({\isasymexists}B \isasymin C. A \isasymin B)
+\tdx{Pow_iff}:           A \isasymin Pow(B) <-> A \isasymsubseteq B
+\tdx{foundation}:        A=0 | ({\isasymexists}x \isasymin A. {\isasymforall}y \isasymin x. y \isasymnotin A)
+
+\tdx{replacement}:       ({\isasymforall}x \isasymin A. {\isasymforall}y z. P(x,y) & P(x,z) --> y=z) ==>
+                   b \isasymin PrimReplace(A,P) <-> ({\isasymexists}x{\isasymin}A. P(x,b))
+\subcaption{The Zermelo-Fraenkel Axioms}
+
+\tdx{Replace_def}: Replace(A,P) == 
+                   PrimReplace(A, \%x y. (\isasymexists!z. P(x,z)) & P(x,y))
+\tdx{RepFun_def}:  RepFun(A,f)  == {\ttlbrace}y . x \isasymin A, y=f(x)\ttrbrace
+\tdx{the_def}:     The(P)       == Union({\ttlbrace}y . x \isasymin {\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
+\tdx{if_def}:      if(P,a,b)    == THE z. P & z=a | ~P & z=b
+\tdx{Collect_def}: Collect(A,P) == {\ttlbrace}y . x \isasymin A, x=y & P(x){\ttrbrace}
+\tdx{Upair_def}:   Upair(a,b)   == 
+               {\ttlbrace}y. x\isasymin{}Pow(Pow(0)), x=0 & y=a | x=Pow(0) & y=b{\ttrbrace}
+\subcaption{Consequences of replacement}
+
+\tdx{Inter_def}:   Inter(A) == {\ttlbrace}x \isasymin Union(A) . {\isasymforall}y \isasymin A. x \isasymin y{\ttrbrace}
+\tdx{Un_def}:      A \isasymunion B  == Union(Upair(A,B))
+\tdx{Int_def}:     A \isasyminter B  == Inter(Upair(A,B))
+\tdx{Diff_def}:    A - B    == {\ttlbrace}x \isasymin A . x \isasymnotin B{\ttrbrace}
+\subcaption{Union, intersection, difference}
+\end{alltt*}
+\caption{Rules and axioms of ZF} \label{zf-rules}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{cons_def}:    cons(a,A) == Upair(a,a) \isasymunion A
+\tdx{succ_def}:    succ(i) == cons(i,i)
+\tdx{infinity}:    0 \isasymin Inf & ({\isasymforall}y \isasymin Inf. succ(y) \isasymin Inf)
+\subcaption{Finite and infinite sets}
+
+\tdx{Pair_def}:      <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
+\tdx{split_def}:     split(c,p) == THE y. {\isasymexists}a b. p=<a,b> & y=c(a,b)
+\tdx{fst_def}:       fst(A)     == split(\%x y. x, p)
+\tdx{snd_def}:       snd(A)     == split(\%x y. y, p)
+\tdx{Sigma_def}:     Sigma(A,B) == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x,y>{\ttrbrace}
+\subcaption{Ordered pairs and Cartesian products}
+
+\tdx{converse_def}: converse(r) == {\ttlbrace}z. w\isasymin{}r, {\isasymexists}x y. w=<x,y> & z=<y,x>{\ttrbrace}
+\tdx{domain_def}:   domain(r)   == {\ttlbrace}x. w \isasymin r, {\isasymexists}y. w=<x,y>{\ttrbrace}
+\tdx{range_def}:    range(r)    == domain(converse(r))
+\tdx{field_def}:    field(r)    == domain(r) \isasymunion range(r)
+\tdx{image_def}:    r `` A      == {\ttlbrace}y\isasymin{}range(r) . {\isasymexists}x \isasymin A. <x,y> \isasymin r{\ttrbrace}
+\tdx{vimage_def}:   r -`` A     == converse(r)``A
+\subcaption{Operations on relations}
+
+\tdx{lam_def}:   Lambda(A,b) == {\ttlbrace}<x,b(x)> . x \isasymin A{\ttrbrace}
+\tdx{apply_def}: f`a         == THE y. <a,y> \isasymin f
+\tdx{Pi_def}: Pi(A,B) == {\ttlbrace}f\isasymin{}Pow(Sigma(A,B)). {\isasymforall}x\isasymin{}A. \isasymexists!y. <x,y>\isasymin{}f{\ttrbrace}
+\tdx{restrict_def}:  restrict(f,A) == lam x \isasymin A. f`x
+\subcaption{Functions and general product}
+\end{alltt*}
+\caption{Further definitions of ZF} \label{zf-defs}
+\end{figure}
+
+
+
+\section{The Zermelo-Fraenkel axioms}
+The axioms appear in Fig.\ts \ref{zf-rules}.  They resemble those
+presented by Suppes~\cite{suppes72}.  Most of the theory consists of
+definitions.  In particular, bounded quantifiers and the subset relation
+appear in other axioms.  Object-level quantifiers and implications have
+been replaced by meta-level ones wherever possible, to simplify use of the
+axioms.  
+
+The traditional replacement axiom asserts
+\[ y \in \isa{PrimReplace}(A,P) \bimp (\exists x\in A. P(x,y)) \]
+subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
+The Isabelle theory defines \cdx{Replace} to apply
+\cdx{PrimReplace} to the single-valued part of~$P$, namely
+\[ (\exists!z. P(x,z)) \conj P(x,y). \]
+Thus $y\in \isa{Replace}(A,P)$ if and only if there is some~$x$ such that
+$P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
+\isa{Replace} is much easier to use than \isa{PrimReplace}; it defines the
+same set, if $P(x,y)$ is single-valued.  The nice syntax for replacement
+expands to \isa{Replace}.
+
+Other consequences of replacement include replacement for 
+meta-level functions
+(\cdx{RepFun}) and definite descriptions (\cdx{The}).
+Axioms for separation (\cdx{Collect}) and unordered pairs
+(\cdx{Upair}) are traditionally assumed, but they actually follow
+from replacement~\cite[pages 237--8]{suppes72}.
+
+The definitions of general intersection, etc., are straightforward.  Note
+the definition of \isa{cons}, which underlies the finite set notation.
+The axiom of infinity gives us a set that contains~0 and is closed under
+successor (\cdx{succ}).  Although this set is not uniquely defined,
+the theory names it (\cdx{Inf}) in order to simplify the
+construction of the natural numbers.
+                                             
+Further definitions appear in Fig.\ts\ref{zf-defs}.  Ordered pairs are
+defined in the standard way, $\pair{a,b}\equiv\{\{a\},\{a,b\}\}$.  Recall
+that \cdx{Sigma}$(A,B)$ generalizes the Cartesian product of two
+sets.  It is defined to be the union of all singleton sets
+$\{\pair{x,y}\}$, for $x\in A$ and $y\in B(x)$.  This is a typical usage of
+general union.
+
+The projections \cdx{fst} and~\cdx{snd} are defined in terms of the
+generalized projection \cdx{split}.  The latter has been borrowed from
+Martin-L\"of's Type Theory, and is often easier to use than \cdx{fst}
+and~\cdx{snd}.
+
+Operations on relations include converse, domain, range, and image.  The
+set $\isa{Pi}(A,B)$ generalizes the space of functions between two sets.
+Note the simple definitions of $\lambda$-abstraction (using
+\cdx{RepFun}) and application (using a definite description).  The
+function \cdx{restrict}$(f,A)$ has the same values as~$f$, but only
+over the domain~$A$.
+
+
+%%%% zf.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{ballI}:     [| !!x. x\isasymin{}A ==> P(x) |] ==> {\isasymforall}x\isasymin{}A. P(x)
+\tdx{bspec}:     [| {\isasymforall}x\isasymin{}A. P(x);  x\isasymin{}A |] ==> P(x)
+\tdx{ballE}:     [| {\isasymforall}x\isasymin{}A. P(x);  P(x) ==> Q;  x \isasymnotin A ==> Q |] ==> Q
+
+\tdx{ball_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
+             ({\isasymforall}x\isasymin{}A. P(x)) <-> ({\isasymforall}x\isasymin{}A'. P'(x))
+
+\tdx{bexI}:      [| P(x);  x\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
+\tdx{bexCI}:     [| {\isasymforall}x\isasymin{}A. ~P(x) ==> P(a);  a\isasymin{}A |] ==> {\isasymexists}x\isasymin{}A. P(x)
+\tdx{bexE}:      [| {\isasymexists}x\isasymin{}A. P(x);  !!x. [| x\isasymin{}A; P(x) |] ==> Q |] ==> Q
+
+\tdx{bex_cong}:  [| A=A';  !!x. x\isasymin{}A' ==> P(x) <-> P'(x) |] ==> 
+             ({\isasymexists}x\isasymin{}A. P(x)) <-> ({\isasymexists}x\isasymin{}A'. P'(x))
+\subcaption{Bounded quantifiers}
+
+\tdx{subsetI}:     (!!x. x \isasymin A ==> x \isasymin B) ==> A \isasymsubseteq B
+\tdx{subsetD}:     [| A \isasymsubseteq B;  c \isasymin A |] ==> c \isasymin B
+\tdx{subsetCE}:    [| A \isasymsubseteq B;  c \isasymnotin A ==> P;  c \isasymin B ==> P |] ==> P
+\tdx{subset_refl}:  A \isasymsubseteq A
+\tdx{subset_trans}: [| A \isasymsubseteq B;  B \isasymsubseteq C |] ==> A \isasymsubseteq C
+
+\tdx{equalityI}:   [| A \isasymsubseteq B;  B \isasymsubseteq A |] ==> A = B
+\tdx{equalityD1}:  A = B ==> A \isasymsubseteq B
+\tdx{equalityD2}:  A = B ==> B \isasymsubseteq A
+\tdx{equalityE}:   [| A = B;  [| A \isasymsubseteq B; B \isasymsubseteq A |] ==> P |]  ==>  P
+\subcaption{Subsets and extensionality}
+
+\tdx{emptyE}:        a \isasymin 0 ==> P
+\tdx{empty_subsetI}:  0 \isasymsubseteq A
+\tdx{equals0I}:      [| !!y. y \isasymin A ==> False |] ==> A=0
+\tdx{equals0D}:      [| A=0;  a \isasymin A |] ==> P
+
+\tdx{PowI}:          A \isasymsubseteq B ==> A \isasymin Pow(B)
+\tdx{PowD}:          A \isasymin Pow(B)  ==>  A \isasymsubseteq B
+\subcaption{The empty set; power sets}
+\end{alltt*}
+\caption{Basic derived rules for ZF} \label{zf-lemmas1}
+\end{figure}
+
+
+\section{From basic lemmas to function spaces}
+Faced with so many definitions, it is essential to prove lemmas.  Even
+trivial theorems like $A \int B = B \int A$ would be difficult to
+prove from the definitions alone.  Isabelle's set theory derives many
+rules using a natural deduction style.  Ideally, a natural deduction
+rule should introduce or eliminate just one operator, but this is not
+always practical.  For most operators, we may forget its definition
+and use its derived rules instead.
+
+\subsection{Fundamental lemmas}
+Figure~\ref{zf-lemmas1} presents the derived rules for the most basic
+operators.  The rules for the bounded quantifiers resemble those for the
+ordinary quantifiers, but note that \tdx{ballE} uses a negated assumption
+in the style of Isabelle's classical reasoner.  The \rmindex{congruence
+  rules} \tdx{ball_cong} and \tdx{bex_cong} are required by Isabelle's
+simplifier, but have few other uses.  Congruence rules must be specially
+derived for all binding operators, and henceforth will not be shown.
+
+Figure~\ref{zf-lemmas1} also shows rules for the subset and equality
+relations (proof by extensionality), and rules about the empty set and the
+power set operator.
+
+Figure~\ref{zf-lemmas2} presents rules for replacement and separation.
+The rules for \cdx{Replace} and \cdx{RepFun} are much simpler than
+comparable rules for \isa{PrimReplace} would be.  The principle of
+separation is proved explicitly, although most proofs should use the
+natural deduction rules for \isa{Collect}.  The elimination rule
+\tdx{CollectE} is equivalent to the two destruction rules
+\tdx{CollectD1} and \tdx{CollectD2}, but each rule is suited to
+particular circumstances.  Although too many rules can be confusing, there
+is no reason to aim for a minimal set of rules.  
+
+Figure~\ref{zf-lemmas3} presents rules for general union and intersection.
+The empty intersection should be undefined.  We cannot have
+$\bigcap(\emptyset)=V$ because $V$, the universal class, is not a set.  All
+expressions denote something in ZF set theory; the definition of
+intersection implies $\bigcap(\emptyset)=\emptyset$, but this value is
+arbitrary.  The rule \tdx{InterI} must have a premise to exclude
+the empty intersection.  Some of the laws governing intersections require
+similar premises.
+
+
+%the [p] gives better page breaking for the book
+\begin{figure}[p]
+\begin{alltt*}\isastyleminor
+\tdx{ReplaceI}:   [| x\isasymin{}A;  P(x,b);  !!y. P(x,y) ==> y=b |] ==> 
+            b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace}
+
+\tdx{ReplaceE}:   [| b\isasymin{}{\ttlbrace}y. x\isasymin{}A, P(x,y){\ttrbrace};  
+               !!x. [| x\isasymin{}A; P(x,b); {\isasymforall}y. P(x,y)-->y=b |] ==> R 
+            |] ==> R
+
+\tdx{RepFunI}:    [| a\isasymin{}A |] ==> f(a)\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace}
+\tdx{RepFunE}:    [| b\isasymin{}{\ttlbrace}f(x). x\isasymin{}A{\ttrbrace};  
+                !!x.[| x\isasymin{}A;  b=f(x) |] ==> P |] ==> P
+
+\tdx{separation}:  a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} <-> a\isasymin{}A & P(a)
+\tdx{CollectI}:    [| a\isasymin{}A;  P(a) |] ==> a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace}
+\tdx{CollectE}:    [| a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace};  [| a\isasymin{}A; P(a) |] ==> R |] ==> R
+\tdx{CollectD1}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> a\isasymin{}A
+\tdx{CollectD2}:   a\isasymin{}{\ttlbrace}x\isasymin{}A. P(x){\ttrbrace} ==> P(a)
+\end{alltt*}
+\caption{Replacement and separation} \label{zf-lemmas2}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{UnionI}: [| B\isasymin{}C;  A\isasymin{}B |] ==> A\isasymin{}Union(C)
+\tdx{UnionE}: [| A\isasymin{}Union(C);  !!B.[| A\isasymin{}B;  B\isasymin{}C |] ==> R |] ==> R
+
+\tdx{InterI}: [| !!x. x\isasymin{}C ==> A\isasymin{}x;  c\isasymin{}C |] ==> A\isasymin{}Inter(C)
+\tdx{InterD}: [| A\isasymin{}Inter(C);  B\isasymin{}C |] ==> A\isasymin{}B
+\tdx{InterE}: [| A\isasymin{}Inter(C);  A\isasymin{}B ==> R;  B \isasymnotin C ==> R |] ==> R
+
+\tdx{UN_I}:   [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x))
+\tdx{UN_E}:   [| b\isasymin{}({\isasymUnion}x\isasymin{}A. B(x));  !!x.[| x\isasymin{}A;  b\isasymin{}B(x) |] ==> R 
+           |] ==> R
+
+\tdx{INT_I}:  [| !!x. x\isasymin{}A ==> b\isasymin{}B(x);  a\isasymin{}A |] ==> b\isasymin{}({\isasymInter}x\isasymin{}A. B(x))
+\tdx{INT_E}:  [| b\isasymin{}({\isasymInter}x\isasymin{}A. B(x));  a\isasymin{}A |] ==> b\isasymin{}B(a)
+\end{alltt*}
+\caption{General union and intersection} \label{zf-lemmas3}
+\end{figure}
+
+
+%%% upair.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{pairing}:   a\isasymin{}Upair(b,c) <-> (a=b | a=c)
+\tdx{UpairI1}:   a\isasymin{}Upair(a,b)
+\tdx{UpairI2}:   b\isasymin{}Upair(a,b)
+\tdx{UpairE}:    [| a\isasymin{}Upair(b,c);  a=b ==> P;  a=c ==> P |] ==> P
+\end{alltt*}
+\caption{Unordered pairs} \label{zf-upair1}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{UnI1}:      c\isasymin{}A ==> c\isasymin{}A \isasymunion B
+\tdx{UnI2}:      c\isasymin{}B ==> c\isasymin{}A \isasymunion B
+\tdx{UnCI}:      (c \isasymnotin B ==> c\isasymin{}A) ==> c\isasymin{}A \isasymunion B
+\tdx{UnE}:       [| c\isasymin{}A \isasymunion B;  c\isasymin{}A ==> P;  c\isasymin{}B ==> P |] ==> P
+
+\tdx{IntI}:      [| c\isasymin{}A;  c\isasymin{}B |] ==> c\isasymin{}A \isasyminter B
+\tdx{IntD1}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}A
+\tdx{IntD2}:     c\isasymin{}A \isasyminter B ==> c\isasymin{}B
+\tdx{IntE}:      [| c\isasymin{}A \isasyminter B;  [| c\isasymin{}A; c\isasymin{}B |] ==> P |] ==> P
+
+\tdx{DiffI}:     [| c\isasymin{}A;  c \isasymnotin B |] ==> c\isasymin{}A - B
+\tdx{DiffD1}:    c\isasymin{}A - B ==> c\isasymin{}A
+\tdx{DiffD2}:    c\isasymin{}A - B ==> c  \isasymnotin  B
+\tdx{DiffE}:     [| c\isasymin{}A - B;  [| c\isasymin{}A; c \isasymnotin B |] ==> P |] ==> P
+\end{alltt*}
+\caption{Union, intersection, difference} \label{zf-Un}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{consI1}:    a\isasymin{}cons(a,B)
+\tdx{consI2}:    a\isasymin{}B ==> a\isasymin{}cons(b,B)
+\tdx{consCI}:    (a \isasymnotin B ==> a=b) ==> a\isasymin{}cons(b,B)
+\tdx{consE}:     [| a\isasymin{}cons(b,A);  a=b ==> P;  a\isasymin{}A ==> P |] ==> P
+
+\tdx{singletonI}:  a\isasymin{}{\ttlbrace}a{\ttrbrace}
+\tdx{singletonE}:  [| a\isasymin{}{\ttlbrace}b{\ttrbrace}; a=b ==> P |] ==> P
+\end{alltt*}
+\caption{Finite and singleton sets} \label{zf-upair2}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{succI1}:    i\isasymin{}succ(i)
+\tdx{succI2}:    i\isasymin{}j ==> i\isasymin{}succ(j)
+\tdx{succCI}:    (i \isasymnotin j ==> i=j) ==> i\isasymin{}succ(j)
+\tdx{succE}:     [| i\isasymin{}succ(j);  i=j ==> P;  i\isasymin{}j ==> P |] ==> P
+\tdx{succ_neq_0}:  [| succ(n)=0 |] ==> P
+\tdx{succ_inject}: succ(m) = succ(n) ==> m=n
+\end{alltt*}
+\caption{The successor function} \label{zf-succ}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{the_equality}: [| P(a); !!x. P(x) ==> x=a |] ==> (THE x. P(x))=a
+\tdx{theI}:         \isasymexists! x. P(x) ==> P(THE x. P(x))
+
+\tdx{if_P}:          P ==> (if P then a else b) = a
+\tdx{if_not_P}:     ~P ==> (if P then a else b) = b
+
+\tdx{mem_asym}:     [| a\isasymin{}b;  b\isasymin{}a |] ==> P
+\tdx{mem_irrefl}:   a\isasymin{}a ==> P
+\end{alltt*}
+\caption{Descriptions; non-circularity} \label{zf-the}
+\end{figure}
+
+
+\subsection{Unordered pairs and finite sets}
+Figure~\ref{zf-upair1} presents the principle of unordered pairing, along
+with its derived rules.  Binary union and intersection are defined in terms
+of ordered pairs (Fig.\ts\ref{zf-Un}).  Set difference is also included.  The
+rule \tdx{UnCI} is useful for classical reasoning about unions,
+like \isa{disjCI}\@; it supersedes \tdx{UnI1} and
+\tdx{UnI2}, but these rules are often easier to work with.  For
+intersection and difference we have both elimination and destruction rules.
+Again, there is no reason to provide a minimal rule set.
+
+Figure~\ref{zf-upair2} is concerned with finite sets: it presents rules
+for~\isa{cons}, the finite set constructor, and rules for singleton
+sets.  Figure~\ref{zf-succ} presents derived rules for the successor
+function, which is defined in terms of~\isa{cons}.  The proof that 
+\isa{succ} is injective appears to require the Axiom of Foundation.
+
+Definite descriptions (\sdx{THE}) are defined in terms of the singleton
+set~$\{0\}$, but their derived rules fortunately hide this
+(Fig.\ts\ref{zf-the}).  The rule~\tdx{theI} is difficult to apply
+because of the two occurrences of~$\Var{P}$.  However,
+\tdx{the_equality} does not have this problem and the files contain
+many examples of its use.
+
+Finally, the impossibility of having both $a\in b$ and $b\in a$
+(\tdx{mem_asym}) is proved by applying the Axiom of Foundation to
+the set $\{a,b\}$.  The impossibility of $a\in a$ is a trivial consequence.
+
+
+%%% subset.thy?
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Union_upper}:    B\isasymin{}A ==> B \isasymsubseteq Union(A)
+\tdx{Union_least}:    [| !!x. x\isasymin{}A ==> x \isasymsubseteq C |] ==> Union(A) \isasymsubseteq C
+
+\tdx{Inter_lower}:    B\isasymin{}A ==> Inter(A) \isasymsubseteq B
+\tdx{Inter_greatest}: [| a\isasymin{}A; !!x. x\isasymin{}A ==> C \isasymsubseteq x |] ==> C\isasymsubseteq{}Inter(A)
+
+\tdx{Un_upper1}:      A \isasymsubseteq A \isasymunion B
+\tdx{Un_upper2}:      B \isasymsubseteq A \isasymunion B
+\tdx{Un_least}:       [| A \isasymsubseteq C;  B \isasymsubseteq C |] ==> A \isasymunion B \isasymsubseteq C
+
+\tdx{Int_lower1}:     A \isasyminter B \isasymsubseteq A
+\tdx{Int_lower2}:     A \isasyminter B \isasymsubseteq B
+\tdx{Int_greatest}:   [| C \isasymsubseteq A;  C \isasymsubseteq B |] ==> C \isasymsubseteq A \isasyminter B
+
+\tdx{Diff_subset}:    A-B \isasymsubseteq A
+\tdx{Diff_contains}:  [| C \isasymsubseteq A;  C \isasyminter B = 0 |] ==> C \isasymsubseteq A-B
+
+\tdx{Collect_subset}: Collect(A,P) \isasymsubseteq A
+\end{alltt*}
+\caption{Subset and lattice properties} \label{zf-subset}
+\end{figure}
+
+
+\subsection{Subset and lattice properties}
+The subset relation is a complete lattice.  Unions form least upper bounds;
+non-empty intersections form greatest lower bounds.  Figure~\ref{zf-subset}
+shows the corresponding rules.  A few other laws involving subsets are
+included. 
+Reasoning directly about subsets often yields clearer proofs than
+reasoning about the membership relation.  Section~\ref{sec:ZF-pow-example}
+below presents an example of this, proving the equation 
+${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.
+
+%%% pair.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Pair_inject1}: <a,b> = <c,d> ==> a=c
+\tdx{Pair_inject2}: <a,b> = <c,d> ==> b=d
+\tdx{Pair_inject}:  [| <a,b> = <c,d>;  [| a=c; b=d |] ==> P |] ==> P
+\tdx{Pair_neq_0}:   <a,b>=0 ==> P
+
+\tdx{fst_conv}:     fst(<a,b>) = a
+\tdx{snd_conv}:     snd(<a,b>) = b
+\tdx{split}:        split(\%x y. c(x,y), <a,b>) = c(a,b)
+
+\tdx{SigmaI}:     [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> <a,b>\isasymin{}Sigma(A,B)
+
+\tdx{SigmaE}:     [| c\isasymin{}Sigma(A,B);  
+                !!x y.[| x\isasymin{}A; y\isasymin{}B(x); c=<x,y> |] ==> P |] ==> P
+
+\tdx{SigmaE2}:    [| <a,b>\isasymin{}Sigma(A,B);    
+                [| a\isasymin{}A;  b\isasymin{}B(a) |] ==> P   |] ==> P
+\end{alltt*}
+\caption{Ordered pairs; projections; general sums} \label{zf-pair}
+\end{figure}
+
+
+\subsection{Ordered pairs} \label{sec:pairs}
+
+Figure~\ref{zf-pair} presents the rules governing ordered pairs,
+projections and general sums --- in particular, that
+$\{\{a\},\{a,b\}\}$ functions as an ordered pair.  This property is
+expressed as two destruction rules,
+\tdx{Pair_inject1} and \tdx{Pair_inject2}, and equivalently
+as the elimination rule \tdx{Pair_inject}.
+
+The rule \tdx{Pair_neq_0} asserts $\pair{a,b}\neq\emptyset$.  This
+is a property of $\{\{a\},\{a,b\}\}$, and need not hold for other 
+encodings of ordered pairs.  The non-standard ordered pairs mentioned below
+satisfy $\pair{\emptyset;\emptyset}=\emptyset$.
+
+The natural deduction rules \tdx{SigmaI} and \tdx{SigmaE}
+assert that \cdx{Sigma}$(A,B)$ consists of all pairs of the form
+$\pair{x,y}$, for $x\in A$ and $y\in B(x)$.  The rule \tdx{SigmaE2}
+merely states that $\pair{a,b}\in \isa{Sigma}(A,B)$ implies $a\in A$ and
+$b\in B(a)$.
+
+In addition, it is possible to use tuples as patterns in abstractions:
+\begin{center}
+{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \isa{split(\%$x$ $y$.\ $t$)}
+\end{center}
+Nested patterns are translated recursively:
+{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
+\isa{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \isa{split(\%$x$. split(\%$y$
+  $z$.\ $t$))}.  The reverse translation is performed upon printing.
+\begin{warn}
+  The translation between patterns and \isa{split} is performed automatically
+  by the parser and printer.  Thus the internal and external form of a term
+  may differ, which affects proofs.  For example the term \isa{(\%<x,y>.<y,x>)<a,b>} requires the theorem \isa{split} to rewrite to
+  {\tt<b,a>}.
+\end{warn}
+In addition to explicit $\lambda$-abstractions, patterns can be used in any
+variable binding construct which is internally described by a
+$\lambda$-abstraction.  Here are some important examples:
+\begin{description}
+\item[Let:] \isa{let {\it pattern} = $t$ in $u$}
+\item[Choice:] \isa{THE~{\it pattern}~.~$P$}
+\item[Set operations:] \isa{\isasymUnion~{\it pattern}:$A$.~$B$}
+\item[Comprehension:] \isa{{\ttlbrace}~{\it pattern}:$A$~.~$P$~{\ttrbrace}}
+\end{description}
+
+
+%%% domrange.thy?
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{domainI}:     <a,b>\isasymin{}r ==> a\isasymin{}domain(r)
+\tdx{domainE}:     [| a\isasymin{}domain(r); !!y. <a,y>\isasymin{}r ==> P |] ==> P
+\tdx{domain_subset}: domain(Sigma(A,B)) \isasymsubseteq A
+
+\tdx{rangeI}:      <a,b>\isasymin{}r ==> b\isasymin{}range(r)
+\tdx{rangeE}:      [| b\isasymin{}range(r); !!x. <x,b>\isasymin{}r ==> P |] ==> P
+\tdx{range_subset}: range(A*B) \isasymsubseteq B
+
+\tdx{fieldI1}:     <a,b>\isasymin{}r ==> a\isasymin{}field(r)
+\tdx{fieldI2}:     <a,b>\isasymin{}r ==> b\isasymin{}field(r)
+\tdx{fieldCI}:     (<c,a> \isasymnotin r ==> <a,b>\isasymin{}r) ==> a\isasymin{}field(r)
+
+\tdx{fieldE}:      [| a\isasymin{}field(r); 
+                !!x. <a,x>\isasymin{}r ==> P; 
+                !!x. <x,a>\isasymin{}r ==> P      
+             |] ==> P
+
+\tdx{field_subset}:  field(A*A) \isasymsubseteq A
+\end{alltt*}
+\caption{Domain, range and field of a relation} \label{zf-domrange}
+\end{figure}
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{imageI}:      [| <a,b>\isasymin{}r; a\isasymin{}A |] ==> b\isasymin{}r``A
+\tdx{imageE}:      [| b\isasymin{}r``A; !!x.[| <x,b>\isasymin{}r; x\isasymin{}A |] ==> P |] ==> P
+
+\tdx{vimageI}:     [| <a,b>\isasymin{}r; b\isasymin{}B |] ==> a\isasymin{}r-``B
+\tdx{vimageE}:     [| a\isasymin{}r-``B; !!x.[| <a,x>\isasymin{}r;  x\isasymin{}B |] ==> P |] ==> P
+\end{alltt*}
+\caption{Image and inverse image} \label{zf-domrange2}
+\end{figure}
+
+
+\subsection{Relations}
+Figure~\ref{zf-domrange} presents rules involving relations, which are sets
+of ordered pairs.  The converse of a relation~$r$ is the set of all pairs
+$\pair{y,x}$ such that $\pair{x,y}\in r$; if $r$ is a function, then
+{\cdx{converse}$(r)$} is its inverse.  The rules for the domain
+operation, namely \tdx{domainI} and~\tdx{domainE}, assert that
+\cdx{domain}$(r)$ consists of all~$x$ such that $r$ contains
+some pair of the form~$\pair{x,y}$.  The range operation is similar, and
+the field of a relation is merely the union of its domain and range.  
+
+Figure~\ref{zf-domrange2} presents rules for images and inverse images.
+Note that these operations are generalisations of range and domain,
+respectively. 
+
+
+%%% func.thy
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{fun_is_rel}:     f\isasymin{}Pi(A,B) ==> f \isasymsubseteq Sigma(A,B)
+
+\tdx{apply_equality}:  [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> f`a = b
+\tdx{apply_equality2}: [| <a,b>\isasymin{}f; <a,c>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b=c
+
+\tdx{apply_type}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> f`a\isasymin{}B(a)
+\tdx{apply_Pair}:     [| f\isasymin{}Pi(A,B); a\isasymin{}A |] ==> <a,f`a>\isasymin{}f
+\tdx{apply_iff}:      f\isasymin{}Pi(A,B) ==> <a,b>\isasymin{}f <-> a\isasymin{}A & f`a = b
+
+\tdx{fun_extension}:  [| f\isasymin{}Pi(A,B); g\isasymin{}Pi(A,D);
+                   !!x. x\isasymin{}A ==> f`x = g`x     |] ==> f=g
+
+\tdx{domain_type}:    [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> a\isasymin{}A
+\tdx{range_type}:     [| <a,b>\isasymin{}f; f\isasymin{}Pi(A,B) |] ==> b\isasymin{}B(a)
+
+\tdx{Pi_type}:        [| f\isasymin{}A->C; !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> f\isasymin{}Pi(A,B)
+\tdx{domain_of_fun}:  f\isasymin{}Pi(A,B) ==> domain(f)=A
+\tdx{range_of_fun}:   f\isasymin{}Pi(A,B) ==> f\isasymin{}A->range(f)
+
+\tdx{restrict}:       a\isasymin{}A ==> restrict(f,A) ` a = f`a
+\tdx{restrict_type}:  [| !!x. x\isasymin{}A ==> f`x\isasymin{}B(x) |] ==> 
+                restrict(f,A)\isasymin{}Pi(A,B)
+\end{alltt*}
+\caption{Functions} \label{zf-func1}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{lamI}:     a\isasymin{}A ==> <a,b(a)>\isasymin{}(lam x\isasymin{}A. b(x))
+\tdx{lamE}:     [| p\isasymin{}(lam x\isasymin{}A. b(x)); !!x.[| x\isasymin{}A; p=<x,b(x)> |] ==> P 
+          |] ==>  P
+
+\tdx{lam_type}: [| !!x. x\isasymin{}A ==> b(x)\isasymin{}B(x) |] ==> (lam x\isasymin{}A. b(x))\isasymin{}Pi(A,B)
+
+\tdx{beta}:     a\isasymin{}A ==> (lam x\isasymin{}A. b(x)) ` a = b(a)
+\tdx{eta}:      f\isasymin{}Pi(A,B) ==> (lam x\isasymin{}A. f`x) = f
+\end{alltt*}
+\caption{$\lambda$-abstraction} \label{zf-lam}
+\end{figure}
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{fun_empty}:           0\isasymin{}0->0
+\tdx{fun_single}:          {\ttlbrace}<a,b>{\ttrbrace}\isasymin{}{\ttlbrace}a{\ttrbrace} -> {\ttlbrace}b{\ttrbrace}
+
+\tdx{fun_disjoint_Un}:     [| f\isasymin{}A->B; g\isasymin{}C->D; A \isasyminter C = 0  |] ==>  
+                     (f \isasymunion g)\isasymin{}(A \isasymunion C) -> (B \isasymunion D)
+
+\tdx{fun_disjoint_apply1}: [| a\isasymin{}A; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
+                     (f \isasymunion g)`a = f`a
+
+\tdx{fun_disjoint_apply2}: [| c\isasymin{}C; f\isasymin{}A->B; g\isasymin{}C->D;  A\isasyminter{}C = 0 |] ==>  
+                     (f \isasymunion g)`c = g`c
+\end{alltt*}
+\caption{Constructing functions from smaller sets} \label{zf-func2}
+\end{figure}
+
+
+\subsection{Functions}
+Functions, represented by graphs, are notoriously difficult to reason
+about.  The ZF theory provides many derived rules, which overlap more
+than they ought.  This section presents the more important rules.
+
+Figure~\ref{zf-func1} presents the basic properties of \cdx{Pi}$(A,B)$,
+the generalized function space.  For example, if $f$ is a function and
+$\pair{a,b}\in f$, then $f`a=b$ (\tdx{apply_equality}).  Two functions
+are equal provided they have equal domains and deliver equals results
+(\tdx{fun_extension}).
+
+By \tdx{Pi_type}, a function typing of the form $f\in A\to C$ can be
+refined to the dependent typing $f\in\prod@{x\in A}B(x)$, given a suitable
+family of sets $\{B(x)\}@{x\in A}$.  Conversely, by \tdx{range_of_fun},
+any dependent typing can be flattened to yield a function type of the form
+$A\to C$; here, $C=\isa{range}(f)$.
+
+Among the laws for $\lambda$-abstraction, \tdx{lamI} and \tdx{lamE}
+describe the graph of the generated function, while \tdx{beta} and
+\tdx{eta} are the standard conversions.  We essentially have a
+dependently-typed $\lambda$-calculus (Fig.\ts\ref{zf-lam}).
+
+Figure~\ref{zf-func2} presents some rules that can be used to construct
+functions explicitly.  We start with functions consisting of at most one
+pair, and may form the union of two functions provided their domains are
+disjoint.  
+
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Int_absorb}:        A \isasyminter A = A
+\tdx{Int_commute}:       A \isasyminter B = B \isasyminter A
+\tdx{Int_assoc}:         (A \isasyminter B) \isasyminter C  =  A \isasyminter (B \isasyminter C)
+\tdx{Int_Un_distrib}:    (A \isasymunion B) \isasyminter C  =  (A \isasyminter C) \isasymunion (B \isasyminter C)
+
+\tdx{Un_absorb}:         A \isasymunion A = A
+\tdx{Un_commute}:        A \isasymunion B = B \isasymunion A
+\tdx{Un_assoc}:          (A \isasymunion B) \isasymunion C  =  A \isasymunion (B \isasymunion C)
+\tdx{Un_Int_distrib}:    (A \isasyminter B) \isasymunion C  =  (A \isasymunion C) \isasyminter (B \isasymunion C)
+
+\tdx{Diff_cancel}:       A-A = 0
+\tdx{Diff_disjoint}:     A \isasyminter (B-A) = 0
+\tdx{Diff_partition}:    A \isasymsubseteq B ==> A \isasymunion (B-A) = B
+\tdx{double_complement}: [| A \isasymsubseteq B; B \isasymsubseteq C |] ==> (B - (C-A)) = A
+\tdx{Diff_Un}:           A - (B \isasymunion C) = (A-B) \isasyminter (A-C)
+\tdx{Diff_Int}:          A - (B \isasyminter C) = (A-B) \isasymunion (A-C)
+
+\tdx{Union_Un_distrib}:  Union(A \isasymunion B) = Union(A) \isasymunion Union(B)
+\tdx{Inter_Un_distrib}:  [| a \isasymin A;  b \isasymin B |] ==> 
+                   Inter(A \isasymunion B) = Inter(A) \isasyminter Inter(B)
+
+\tdx{Int_Union_RepFun}:  A \isasyminter Union(B) = ({\isasymUnion}C \isasymin B. A \isasyminter C)
+
+\tdx{Un_Inter_RepFun}:   b \isasymin B ==> 
+                   A \isasymunion Inter(B) = ({\isasymInter}C \isasymin B. A \isasymunion C)
+
+\tdx{SUM_Un_distrib1}:   (SUM x \isasymin A \isasymunion B. C(x)) = 
+                   (SUM x \isasymin A. C(x)) \isasymunion (SUM x \isasymin B. C(x))
+
+\tdx{SUM_Un_distrib2}:   (SUM x \isasymin C. A(x) \isasymunion B(x)) =
+                   (SUM x \isasymin C. A(x)) \isasymunion (SUM x \isasymin C. B(x))
+
+\tdx{SUM_Int_distrib1}:  (SUM x \isasymin A \isasyminter B. C(x)) =
+                   (SUM x \isasymin A. C(x)) \isasyminter (SUM x \isasymin B. C(x))
+
+\tdx{SUM_Int_distrib2}:  (SUM x \isasymin C. A(x) \isasyminter B(x)) =
+                   (SUM x \isasymin C. A(x)) \isasyminter (SUM x \isasymin C. B(x))
+\end{alltt*}
+\caption{Equalities} \label{zf-equalities}
+\end{figure}
+
+
+\begin{figure}
+%\begin{constants} 
+%  \cdx{1}       & $i$           &       & $\{\emptyset\}$       \\
+%  \cdx{bool}    & $i$           &       & the set $\{\emptyset,1\}$     \\
+%  \cdx{cond}   & $[i,i,i]\To i$ &       & conditional for \isa{bool}    \\
+%  \cdx{not}    & $i\To i$       &       & negation for \isa{bool}       \\
+%  \sdx{and}    & $[i,i]\To i$   & Left 70 & conjunction for \isa{bool}  \\
+%  \sdx{or}     & $[i,i]\To i$   & Left 65 & disjunction for \isa{bool}  \\
+%  \sdx{xor}    & $[i,i]\To i$   & Left 65 & exclusive-or for \isa{bool}
+%\end{constants}
+%
+\begin{alltt*}\isastyleminor
+\tdx{bool_def}:      bool == {\ttlbrace}0,1{\ttrbrace}
+\tdx{cond_def}:      cond(b,c,d) == if b=1 then c else d
+\tdx{not_def}:       not(b)  == cond(b,0,1)
+\tdx{and_def}:       a and b == cond(a,b,0)
+\tdx{or_def}:        a or b  == cond(a,1,b)
+\tdx{xor_def}:       a xor b == cond(a,not(b),b)
+
+\tdx{bool_1I}:       1 \isasymin bool
+\tdx{bool_0I}:       0 \isasymin bool
+\tdx{boolE}:         [| c \isasymin bool;  c=1 ==> P;  c=0 ==> P |] ==> P
+\tdx{cond_1}:        cond(1,c,d) = c
+\tdx{cond_0}:        cond(0,c,d) = d
+\end{alltt*}
+\caption{The booleans} \label{zf-bool}
+\end{figure}
+
+
+\section{Further developments}
+The next group of developments is complex and extensive, and only
+highlights can be covered here.  It involves many theories and proofs. 
+
+Figure~\ref{zf-equalities} presents commutative, associative, distributive,
+and idempotency laws of union and intersection, along with other equations.
+
+Theory \thydx{Bool} defines $\{0,1\}$ as a set of booleans, with the usual
+operators including a conditional (Fig.\ts\ref{zf-bool}).  Although ZF is a
+first-order theory, you can obtain the effect of higher-order logic using
+\isa{bool}-valued functions, for example.  The constant~\isa{1} is
+translated to \isa{succ(0)}.
+
+\begin{figure}
+\index{*"+ symbol}
+\begin{constants}
+  \it symbol    & \it meta-type & \it priority & \it description \\ 
+  \tt +         & $[i,i]\To i$  &  Right 65     & disjoint union operator\\
+  \cdx{Inl}~~\cdx{Inr}  & $i\To i$      &       & injections\\
+  \cdx{case}    & $[i\To i,i\To i, i]\To i$ &   & conditional for $A+B$
+\end{constants}
+\begin{alltt*}\isastyleminor
+\tdx{sum_def}:   A+B == {\ttlbrace}0{\ttrbrace}*A \isasymunion {\ttlbrace}1{\ttrbrace}*B
+\tdx{Inl_def}:   Inl(a) == <0,a>
+\tdx{Inr_def}:   Inr(b) == <1,b>
+\tdx{case_def}:  case(c,d,u) == split(\%y z. cond(y, d(z), c(z)), u)
+
+\tdx{InlI}:      a \isasymin A ==> Inl(a) \isasymin A+B
+\tdx{InrI}:      b \isasymin B ==> Inr(b) \isasymin A+B
+
+\tdx{Inl_inject}:  Inl(a)=Inl(b) ==> a=b
+\tdx{Inr_inject}:  Inr(a)=Inr(b) ==> a=b
+\tdx{Inl_neq_Inr}: Inl(a)=Inr(b) ==> P
+
+\tdx{sum_iff}:  u \isasymin A+B <-> ({\isasymexists}x\isasymin{}A. u=Inl(x)) | ({\isasymexists}y\isasymin{}B. u=Inr(y))
+
+\tdx{case_Inl}:  case(c,d,Inl(a)) = c(a)
+\tdx{case_Inr}:  case(c,d,Inr(b)) = d(b)
+\end{alltt*}
+\caption{Disjoint unions} \label{zf-sum}
+\end{figure}
+
+
+\subsection{Disjoint unions}
+
+Theory \thydx{Sum} defines the disjoint union of two sets, with
+injections and a case analysis operator (Fig.\ts\ref{zf-sum}).  Disjoint
+unions play a role in datatype definitions, particularly when there is
+mutual recursion~\cite{paulson-set-II}.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{QPair_def}:      <a;b> == a+b
+\tdx{qsplit_def}:     qsplit(c,p)  == THE y. {\isasymexists}a b. p=<a;b> & y=c(a,b)
+\tdx{qfsplit_def}:    qfsplit(R,z) == {\isasymexists}x y. z=<x;y> & R(x,y)
+\tdx{qconverse_def}:  qconverse(r) == {\ttlbrace}z. w \isasymin r, {\isasymexists}x y. w=<x;y> & z=<y;x>{\ttrbrace}
+\tdx{QSigma_def}:     QSigma(A,B)  == {\isasymUnion}x \isasymin A. {\isasymUnion}y \isasymin B(x). {\ttlbrace}<x;y>{\ttrbrace}
+
+\tdx{qsum_def}:       A <+> B      == ({\ttlbrace}0{\ttrbrace} <*> A) \isasymunion ({\ttlbrace}1{\ttrbrace} <*> B)
+\tdx{QInl_def}:       QInl(a)      == <0;a>
+\tdx{QInr_def}:       QInr(b)      == <1;b>
+\tdx{qcase_def}:      qcase(c,d)   == qsplit(\%y z. cond(y, d(z), c(z)))
+\end{alltt*}
+\caption{Non-standard pairs, products and sums} \label{zf-qpair}
+\end{figure}
+
+
+\subsection{Non-standard ordered pairs}
+
+Theory \thydx{QPair} defines a notion of ordered pair that admits
+non-well-founded tupling (Fig.\ts\ref{zf-qpair}).  Such pairs are written
+{\tt<$a$;$b$>}.  It also defines the eliminator \cdx{qsplit}, the
+converse operator \cdx{qconverse}, and the summation operator
+\cdx{QSigma}.  These are completely analogous to the corresponding
+versions for standard ordered pairs.  The theory goes on to define a
+non-standard notion of disjoint sum using non-standard pairs.  All of these
+concepts satisfy the same properties as their standard counterparts; in
+addition, {\tt<$a$;$b$>} is continuous.  The theory supports coinductive
+definitions, for example of infinite lists~\cite{paulson-mscs}.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{bnd_mono_def}:  bnd_mono(D,h) == 
+               h(D)\isasymsubseteq{}D & ({\isasymforall}W X. W\isasymsubseteq{}X --> X\isasymsubseteq{}D --> h(W)\isasymsubseteq{}h(X))
+
+\tdx{lfp_def}:       lfp(D,h) == Inter({\ttlbrace}X \isasymin Pow(D). h(X) \isasymsubseteq X{\ttrbrace})
+\tdx{gfp_def}:       gfp(D,h) == Union({\ttlbrace}X \isasymin Pow(D). X \isasymsubseteq h(X){\ttrbrace})
+
+
+\tdx{lfp_lowerbound}: [| h(A) \isasymsubseteq A;  A \isasymsubseteq D |] ==> lfp(D,h) \isasymsubseteq A
+
+\tdx{lfp_subset}:    lfp(D,h) \isasymsubseteq D
+
+\tdx{lfp_greatest}:  [| bnd_mono(D,h);  
+                  !!X. [| h(X) \isasymsubseteq X;  X \isasymsubseteq D |] ==> A \isasymsubseteq X 
+               |] ==> A \isasymsubseteq lfp(D,h)
+
+\tdx{lfp_Tarski}:    bnd_mono(D,h) ==> lfp(D,h) = h(lfp(D,h))
+
+\tdx{induct}:        [| a \isasymin lfp(D,h);  bnd_mono(D,h);
+                  !!x. x \isasymin h(Collect(lfp(D,h),P)) ==> P(x)
+               |] ==> P(a)
+
+\tdx{lfp_mono}:      [| bnd_mono(D,h);  bnd_mono(E,i);
+                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
+               |] ==> lfp(D,h) \isasymsubseteq lfp(E,i)
+
+\tdx{gfp_upperbound}: [| A \isasymsubseteq h(A);  A \isasymsubseteq D |] ==> A \isasymsubseteq gfp(D,h)
+
+\tdx{gfp_subset}:    gfp(D,h) \isasymsubseteq D
+
+\tdx{gfp_least}:     [| bnd_mono(D,h);  
+                  !!X. [| X \isasymsubseteq h(X);  X \isasymsubseteq D |] ==> X \isasymsubseteq A
+               |] ==> gfp(D,h) \isasymsubseteq A
+
+\tdx{gfp_Tarski}:    bnd_mono(D,h) ==> gfp(D,h) = h(gfp(D,h))
+
+\tdx{coinduct}:      [| bnd_mono(D,h); a \isasymin X; X \isasymsubseteq h(X \isasymunion gfp(D,h)); X \isasymsubseteq D 
+               |] ==> a \isasymin gfp(D,h)
+
+\tdx{gfp_mono}:      [| bnd_mono(D,h);  D \isasymsubseteq E;
+                  !!X. X \isasymsubseteq D ==> h(X) \isasymsubseteq i(X)  
+               |] ==> gfp(D,h) \isasymsubseteq gfp(E,i)
+\end{alltt*}
+\caption{Least and greatest fixedpoints} \label{zf-fixedpt}
+\end{figure}
+
+
+\subsection{Least and greatest fixedpoints}
+
+The Knaster-Tarski Theorem states that every monotone function over a
+complete lattice has a fixedpoint.  Theory \thydx{Fixedpt} proves the
+Theorem only for a particular lattice, namely the lattice of subsets of a
+set (Fig.\ts\ref{zf-fixedpt}).  The theory defines least and greatest
+fixedpoint operators with corresponding induction and coinduction rules.
+These are essential to many definitions that follow, including the natural
+numbers and the transitive closure operator.  The (co)inductive definition
+package also uses the fixedpoint operators~\cite{paulson-CADE}.  See
+Davey and Priestley~\cite{davey-priestley} for more on the Knaster-Tarski
+Theorem and my paper~\cite{paulson-set-II} for discussion of the Isabelle
+proofs.
+
+Monotonicity properties are proved for most of the set-forming operations:
+union, intersection, Cartesian product, image, domain, range, etc.  These
+are useful for applying the Knaster-Tarski Fixedpoint Theorem.  The proofs
+themselves are trivial applications of Isabelle's classical reasoner. 
+
+
+\subsection{Finite sets and lists}
+
+Theory \texttt{Finite} (Figure~\ref{zf-fin}) defines the finite set operator;
+$\isa{Fin}(A)$ is the set of all finite sets over~$A$.  The theory employs
+Isabelle's inductive definition package, which proves various rules
+automatically.  The induction rule shown is stronger than the one proved by
+the package.  The theory also defines the set of all finite functions
+between two given sets.
+
+\begin{figure}
+\begin{alltt*}\isastyleminor
+\tdx{Fin.emptyI}      0 \isasymin Fin(A)
+\tdx{Fin.consI}       [| a \isasymin A;  b \isasymin Fin(A) |] ==> cons(a,b) \isasymin Fin(A)
+
+\tdx{Fin_induct}
+    [| b \isasymin Fin(A);
+       P(0);
+       !!x y. [| x\isasymin{}A; y\isasymin{}Fin(A); x\isasymnotin{}y; P(y) |] ==> P(cons(x,y))
+    |] ==> P(b)
+
+\tdx{Fin_mono}:       A \isasymsubseteq B ==> Fin(A) \isasymsubseteq Fin(B)
+\tdx{Fin_UnI}:        [| b \isasymin Fin(A);  c \isasymin Fin(A) |] ==> b \isasymunion c \isasymin Fin(A)
+\tdx{Fin_UnionI}:     C \isasymin Fin(Fin(A)) ==> Union(C) \isasymin Fin(A)
+\tdx{Fin_subset}:     [| c \isasymsubseteq b;  b \isasymin Fin(A) |] ==> c \isasymin Fin(A)
+\end{alltt*}
+\caption{The finite set operator} \label{zf-fin}
+\end{figure}
+
+\begin{figure}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{list}    & $i\To i$      && lists over some set\\
+  \cdx{list_case} & $[i, [i,i]\To i, i] \To i$  && conditional for $list(A)$ \\
+  \cdx{map}     & $[i\To i, i] \To i$   &       & mapping functional\\
+  \cdx{length}  & $i\To i$              &       & length of a list\\
+  \cdx{rev}     & $i\To i$              &       & reverse of a list\\
+  \tt \at       & $[i,i]\To i$  &  Right 60     & append for lists\\
+  \cdx{flat}    & $i\To i$   &                  & append of list of lists
+\end{constants}
+
+\underscoreon %%because @ is used here
+\begin{alltt*}\isastyleminor
+\tdx{NilI}:       Nil \isasymin list(A)
+\tdx{ConsI}:      [| a \isasymin A;  l \isasymin list(A) |] ==> Cons(a,l) \isasymin list(A)
+
+\tdx{List.induct}
+    [| l \isasymin list(A);
+       P(Nil);
+       !!x y. [| x \isasymin A;  y \isasymin list(A);  P(y) |] ==> P(Cons(x,y))
+    |] ==> P(l)
+
+\tdx{Cons_iff}:       Cons(a,l)=Cons(a',l') <-> a=a' & l=l'
+\tdx{Nil_Cons_iff}:    Nil \isasymnoteq Cons(a,l)
+
+\tdx{list_mono}:      A \isasymsubseteq B ==> list(A) \isasymsubseteq list(B)
+
+\tdx{map_ident}:      l\isasymin{}list(A) ==> map(\%u. u, l) = l
+\tdx{map_compose}:    l\isasymin{}list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
+\tdx{map_app_distrib}: xs\isasymin{}list(A) ==> map(h, xs@ys) = map(h,xs)@map(h,ys)
+\tdx{map_type}
+    [| l\isasymin{}list(A); !!x. x\isasymin{}A ==> h(x)\isasymin{}B |] ==> map(h,l)\isasymin{}list(B)
+\tdx{map_flat}
+    ls: list(list(A)) ==> map(h, flat(ls)) = flat(map(map(h),ls))
+\end{alltt*}
+\caption{Lists} \label{zf-list}
+\end{figure}
+
+
+Figure~\ref{zf-list} presents the set of lists over~$A$, $\isa{list}(A)$.  The
+definition employs Isabelle's datatype package, which defines the introduction
+and induction rules automatically, as well as the constructors, case operator
+(\isa{list\_case}) and recursion operator.  The theory then defines the usual
+list functions by primitive recursion.  See theory \texttt{List}.
+
+
+\subsection{Miscellaneous}
+
+\begin{figure}
+\begin{constants} 
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \sdx{O}       & $[i,i]\To i$  &  Right 60     & composition ($\circ$) \\
+  \cdx{id}      & $i\To i$      &       & identity function \\
+  \cdx{inj}     & $[i,i]\To i$  &       & injective function space\\
+  \cdx{surj}    & $[i,i]\To i$  &       & surjective function space\\
+  \cdx{bij}     & $[i,i]\To i$  &       & bijective function space
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{comp_def}: r O s     == {\ttlbrace}xz \isasymin domain(s)*range(r) . 
+                        {\isasymexists}x y z. xz=<x,z> & <x,y> \isasymin s & <y,z> \isasymin r{\ttrbrace}
+\tdx{id_def}:   id(A)     == (lam x \isasymin A. x)
+\tdx{inj_def}:  inj(A,B)  == {\ttlbrace} f\isasymin{}A->B. {\isasymforall}w\isasymin{}A. {\isasymforall}x\isasymin{}A. f`w=f`x --> w=x {\ttrbrace}
+\tdx{surj_def}: surj(A,B) == {\ttlbrace} f\isasymin{}A->B . {\isasymforall}y\isasymin{}B. {\isasymexists}x\isasymin{}A. f`x=y {\ttrbrace}
+\tdx{bij_def}:  bij(A,B)  == inj(A,B) \isasyminter surj(A,B)
+
+
+\tdx{left_inverse}:    [| f\isasymin{}inj(A,B);  a\isasymin{}A |] ==> converse(f)`(f`a) = a
+\tdx{right_inverse}:   [| f\isasymin{}inj(A,B);  b\isasymin{}range(f) |] ==> 
+                 f`(converse(f)`b) = b
+
+\tdx{inj_converse_inj}: f\isasymin{}inj(A,B) ==> converse(f) \isasymin inj(range(f),A)
+\tdx{bij_converse_bij}: f\isasymin{}bij(A,B) ==> converse(f) \isasymin bij(B,A)
+
+\tdx{comp_type}:     [| s \isasymsubseteq A*B;  r \isasymsubseteq B*C |] ==> (r O s) \isasymsubseteq A*C
+\tdx{comp_assoc}:    (r O s) O t = r O (s O t)
+
+\tdx{left_comp_id}:  r \isasymsubseteq A*B ==> id(B) O r = r
+\tdx{right_comp_id}: r \isasymsubseteq A*B ==> r O id(A) = r
+
+\tdx{comp_func}:     [| g\isasymin{}A->B; f\isasymin{}B->C |] ==> (f O g) \isasymin A->C
+\tdx{comp_func_apply}: [| g\isasymin{}A->B; f\isasymin{}B->C; a\isasymin{}A |] ==> (f O g)`a = f`(g`a)
+
+\tdx{comp_inj}:      [| g\isasymin{}inj(A,B);  f\isasymin{}inj(B,C)  |] ==> (f O g)\isasymin{}inj(A,C)
+\tdx{comp_surj}:     [| g\isasymin{}surj(A,B); f\isasymin{}surj(B,C) |] ==> (f O g)\isasymin{}surj(A,C)
+\tdx{comp_bij}:      [| g\isasymin{}bij(A,B); f\isasymin{}bij(B,C) |] ==> (f O g)\isasymin{}bij(A,C)
+
+\tdx{left_comp_inverse}:    f\isasymin{}inj(A,B) ==> converse(f) O f = id(A)
+\tdx{right_comp_inverse}:   f\isasymin{}surj(A,B) ==> f O converse(f) = id(B)
+
+\tdx{bij_disjoint_Un}:  
+    [| f\isasymin{}bij(A,B);  g\isasymin{}bij(C,D);  A \isasyminter C = 0;  B \isasyminter D = 0 |] ==> 
+    (f \isasymunion g)\isasymin{}bij(A \isasymunion C, B \isasymunion D)
+
+\tdx{restrict_bij}: [| f\isasymin{}inj(A,B); C\isasymsubseteq{}A |] ==> restrict(f,C)\isasymin{}bij(C, f``C)
+\end{alltt*}
+\caption{Permutations} \label{zf-perm}
+\end{figure}
+
+The theory \thydx{Perm} is concerned with permutations (bijections) and
+related concepts.  These include composition of relations, the identity
+relation, and three specialized function spaces: injective, surjective and
+bijective.  Figure~\ref{zf-perm} displays many of their properties that
+have been proved.  These results are fundamental to a treatment of
+equipollence and cardinality.
+
+Theory \thydx{Univ} defines a `universe' $\isa{univ}(A)$, which is used by
+the datatype package.  This set contains $A$ and the
+natural numbers.  Vitally, it is closed under finite products: 
+$\isa{univ}(A)\times\isa{univ}(A)\subseteq\isa{univ}(A)$.  This theory also
+defines the cumulative hierarchy of axiomatic set theory, which
+traditionally is written $V@\alpha$ for an ordinal~$\alpha$.  The
+`universe' is a simple generalization of~$V@\omega$.
+
+Theory \thydx{QUniv} defines a `universe' $\isa{quniv}(A)$, which is used by
+the datatype package to construct codatatypes such as streams.  It is
+analogous to $\isa{univ}(A)$ (and is defined in terms of it) but is closed
+under the non-standard product and sum.
+
+
+\section{Automatic Tools}
+
+ZF provides the simplifier and the classical reasoner.  Moreover it supplies a
+specialized tool to infer `types' of terms.
+
+\subsection{Simplification and Classical Reasoning}
+
+ZF inherits simplification from FOL but adopts it for set theory.  The
+extraction of rewrite rules takes the ZF primitives into account.  It can
+strip bounded universal quantifiers from a formula; for example, ${\forall
+  x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
+f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from $a\in
+A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
+
+The default simpset used by \isa{simp} contains congruence rules for all of ZF's
+binding operators.  It contains all the conversion rules, such as
+\isa{fst} and
+\isa{snd}, as well as the rewrites shown in Fig.\ts\ref{zf-simpdata}.
+
+Classical reasoner methods such as \isa{blast} and \isa{auto} refer to
+a rich collection of built-in axioms for all the set-theoretic
+primitives.
+
+
+\begin{figure}
+\begin{eqnarray*}
+  a\in \emptyset        & \bimp &  \bot\\
+  a \in A \un B      & \bimp &  a\in A \disj a\in B\\
+  a \in A \int B      & \bimp &  a\in A \conj a\in B\\
+  a \in A-B             & \bimp &  a\in A \conj \lnot (a\in B)\\
+  \pair{a,b}\in \isa{Sigma}(A,B)
+                        & \bimp &  a\in A \conj b\in B(a)\\
+  a \in \isa{Collect}(A,P)      & \bimp &  a\in A \conj P(a)\\
+  (\forall x \in \emptyset. P(x)) & \bimp &  \top\\
+  (\forall x \in A. \top)       & \bimp &  \top
+\end{eqnarray*}
+\caption{Some rewrite rules for set theory} \label{zf-simpdata}
+\end{figure}
+
+
+\subsection{Type-Checking Tactics}
+\index{type-checking tactics}
+
+Isabelle/ZF provides simple tactics to help automate those proofs that are
+essentially type-checking.  Such proofs are built by applying rules such as
+these:
+\begin{ttbox}\isastyleminor
+[| ?P ==> ?a \isasymin ?A; ~?P ==> ?b \isasymin ?A |] 
+==> (if ?P then ?a else ?b) \isasymin ?A
+
+[| ?m \isasymin nat; ?n \isasymin nat |] ==> ?m #+ ?n \isasymin nat
+
+?a \isasymin ?A ==> Inl(?a) \isasymin ?A + ?B  
+\end{ttbox}
+In typical applications, the goal has the form $t\in\Var{A}$: in other words,
+we have a specific term~$t$ and need to infer its `type' by instantiating the
+set variable~$\Var{A}$.  Neither the simplifier nor the classical reasoner
+does this job well.  The if-then-else rule, and many similar ones, can make
+the classical reasoner loop.  The simplifier refuses (on principle) to
+instantiate variables during rewriting, so goals such as \isa{i\#+j \isasymin \ ?A}
+are left unsolved.
+
+The simplifier calls the type-checker to solve rewritten subgoals: this stage
+can indeed instantiate variables.  If you have defined new constants and
+proved type-checking rules for them, then declare the rules using
+the attribute \isa{TC} and the rest should be automatic.  In
+particular, the simplifier will use type-checking to help satisfy
+conditional rewrite rules. Call the method \ttindex{typecheck} to
+break down all subgoals using type-checking rules. You can add new
+type-checking rules temporarily like this:
+\begin{isabelle}
+\isacommand{apply}\ (typecheck add:\ inj_is_fun)
+\end{isabelle}
+
+
+%Though the easiest way to invoke the type-checker is via the simplifier,
+%specialized applications may require more detailed knowledge of
+%the type-checking primitives.  They are modelled on the simplifier's:
+%\begin{ttdescription}
+%\item[\ttindexbold{tcset}] is the type of tcsets: sets of type-checking rules.
+%
+%\item[\ttindexbold{addTCs}] is an infix operator to add type-checking rules to
+%  a tcset.
+%  
+%\item[\ttindexbold{delTCs}] is an infix operator to remove type-checking rules
+%  from a tcset.
+%
+%\item[\ttindexbold{typecheck_tac}] is a tactic for attempting to prove all
+%  subgoals using the rules given in its argument, a tcset.
+%\end{ttdescription}
+%
+%Tcsets, like simpsets, are associated with theories and are merged when
+%theories are merged.  There are further primitives that use the default tcset.
+%\begin{ttdescription}
+%\item[\ttindexbold{tcset}] is a function to return the default tcset; use the
+%  expression \isa{tcset()}.
+%
+%\item[\ttindexbold{AddTCs}] adds type-checking rules to the default tcset.
+%  
+%\item[\ttindexbold{DelTCs}] removes type-checking rules from the default
+%  tcset.
+%
+%\item[\ttindexbold{Typecheck_tac}] calls \isa{typecheck_tac} using the
+%  default tcset.
+%\end{ttdescription}
+%
+%To supply some type-checking rules temporarily, using \isa{Addrules} and
+%later \isa{Delrules} is the simplest way.  There is also a high-tech
+%approach.  Call the simplifier with a new solver expressed using
+%\ttindexbold{type_solver_tac} and your temporary type-checking rules.
+%\begin{ttbox}\isastyleminor
+%by (asm_simp_tac 
+%     (simpset() setSolver type_solver_tac (tcset() addTCs prems)) 2);
+%\end{ttbox}
+
+
+\section{Natural number and integer arithmetic}
+
+\index{arithmetic|(}
+
+\begin{figure}\small
+\index{#*@{\tt\#*} symbol}
+\index{*div symbol}
+\index{*mod symbol}
+\index{#+@{\tt\#+} symbol}
+\index{#-@{\tt\#-} symbol}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{nat}     & $i$                   &       & set of natural numbers \\
+  \cdx{nat_case}& $[i,i\To i,i]\To i$     &     & conditional for $nat$\\
+  \tt \#*       & $[i,i]\To i$  &  Left 70      & multiplication \\
+  \tt div       & $[i,i]\To i$  &  Left 70      & division\\
+  \tt mod       & $[i,i]\To i$  &  Left 70      & modulus\\
+  \tt \#+       & $[i,i]\To i$  &  Left 65      & addition\\
+  \tt \#-       & $[i,i]\To i$  &  Left 65      & subtraction
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{nat_def}: nat == lfp(lam r \isasymin Pow(Inf). {\ttlbrace}0{\ttrbrace} \isasymunion {\ttlbrace}succ(x). x \isasymin r{\ttrbrace}
+
+\tdx{nat_case_def}:  nat_case(a,b,k) == 
+              THE y. k=0 & y=a | ({\isasymexists}x. k=succ(x) & y=b(x))
+
+\tdx{nat_0I}:           0 \isasymin nat
+\tdx{nat_succI}:        n \isasymin nat ==> succ(n) \isasymin nat
+
+\tdx{nat_induct}:        
+    [| n \isasymin nat;  P(0);  !!x. [| x \isasymin nat;  P(x) |] ==> P(succ(x)) 
+    |] ==> P(n)
+
+\tdx{nat_case_0}:       nat_case(a,b,0) = a
+\tdx{nat_case_succ}:    nat_case(a,b,succ(m)) = b(m)
+
+\tdx{add_0_natify}:     0 #+ n = natify(n)
+\tdx{add_succ}:         succ(m) #+ n = succ(m #+ n)
+
+\tdx{mult_type}:        m #* n \isasymin nat
+\tdx{mult_0}:           0 #* n = 0
+\tdx{mult_succ}:        succ(m) #* n = n #+ (m #* n)
+\tdx{mult_commute}:     m #* n = n #* m
+\tdx{add_mult_dist}:    (m #+ n) #* k = (m #* k) #+ (n #* k)
+\tdx{mult_assoc}:       (m #* n) #* k = m #* (n #* k)
+\tdx{mod_div_equality}: m \isasymin nat ==> (m div n)#*n #+ m mod n = m
+\end{alltt*}
+\caption{The natural numbers} \label{zf-nat}
+\end{figure}
+
+\index{natural numbers}
+
+Theory \thydx{Nat} defines the natural numbers and mathematical
+induction, along with a case analysis operator.  The set of natural
+numbers, here called \isa{nat}, is known in set theory as the ordinal~$\omega$.
+
+Theory \thydx{Arith} develops arithmetic on the natural numbers
+(Fig.\ts\ref{zf-nat}).  Addition, multiplication and subtraction are defined
+by primitive recursion.  Division and remainder are defined by repeated
+subtraction, which requires well-founded recursion; the termination argument
+relies on the divisor's being non-zero.  Many properties are proved:
+commutative, associative and distributive laws, identity and cancellation
+laws, etc.  The most interesting result is perhaps the theorem $a \bmod b +
+(a/b)\times b = a$.
+
+To minimize the need for tedious proofs of $t\in\isa{nat}$, the arithmetic
+operators coerce their arguments to be natural numbers.  The function
+\cdx{natify} is defined such that $\isa{natify}(n) = n$ if $n$ is a natural
+number, $\isa{natify}(\isa{succ}(x)) =
+\isa{succ}(\isa{natify}(x))$ for all $x$, and finally
+$\isa{natify}(x)=0$ in all other cases.  The benefit is that the addition,
+subtraction, multiplication, division and remainder operators always return
+natural numbers, regardless of their arguments.  Algebraic laws (commutative,
+associative, distributive) are unconditional.  Occurrences of \isa{natify}
+as operands of those operators are simplified away.  Any remaining occurrences
+can either be tolerated or else eliminated by proving that the argument is a
+natural number.
+
+The simplifier automatically cancels common terms on the opposite sides of
+subtraction and of relations ($=$, $<$ and $\le$).  Here is an example:
+\begin{isabelle}
+ 1. i \#+ j \#+ k \#- j < k \#+ l\isanewline
+\isacommand{apply}\ simp\isanewline
+ 1. natify(i) < natify(l)
+\end{isabelle}
+Given the assumptions \isa{i \isasymin nat} and \isa{l \isasymin nat}, both occurrences of
+\cdx{natify} would be simplified away.
+
+
+\begin{figure}\small
+\index{$*@{\tt\$*} symbol}
+\index{$+@{\tt\$+} symbol}
+\index{$-@{\tt\$-} symbol}
+\begin{constants}
+  \it symbol  & \it meta-type & \it priority & \it description \\ 
+  \cdx{int}     & $i$                   &       & set of integers \\
+  \tt \$*       & $[i,i]\To i$  &  Left 70      & multiplication \\
+  \tt \$+       & $[i,i]\To i$  &  Left 65      & addition\\
+  \tt \$-       & $[i,i]\To i$  &  Left 65      & subtraction\\
+  \tt \$<       & $[i,i]\To o$  &  Left 50      & $<$ on integers\\
+  \tt \$<=      & $[i,i]\To o$  &  Left 50      & $\le$ on integers
+\end{constants}
+
+\begin{alltt*}\isastyleminor
+\tdx{zadd_0_intify}:    0 $+ n = intify(n)
+
+\tdx{zmult_type}:       m $* n \isasymin int
+\tdx{zmult_0}:          0 $* n = 0
+\tdx{zmult_commute}:    m $* n = n $* m
+\tdx{zadd_zmult_dist}:   (m $+ n) $* k = (m $* k) $+ (n $* k)
+\tdx{zmult_assoc}:      (m $* n) $* k = m $* (n $* k)
+\end{alltt*}
+\caption{The integers} \label{zf-int}
+\end{figure}
+
+
+\index{integers}
+
+Theory \thydx{Int} defines the integers, as equivalence classes of natural
+numbers.   Figure~\ref{zf-int} presents a tidy collection of laws.  In
+fact, a large library of facts is proved, including monotonicity laws for
+addition and multiplication, covering both positive and negative operands.  
+
+As with the natural numbers, the need for typing proofs is minimized.  All the
+operators defined in Fig.\ts\ref{zf-int} coerce their operands to integers by
+applying the function \cdx{intify}.  This function is the identity on integers
+and maps other operands to zero.
+
+Decimal notation is provided for the integers.  Numbers, written as
+\isa{\#$nnn$} or \isa{\#-$nnn$}, are represented internally in
+two's-complement binary.  Expressions involving addition, subtraction and
+multiplication of numeral constants are evaluated (with acceptable efficiency)
+by simplification.  The simplifier also collects similar terms, multiplying
+them by a numerical coefficient.  It also cancels occurrences of the same
+terms on the other side of the relational operators.  Example:
+\begin{isabelle}
+ 1. y \$+ z \$+ \#-3 \$* x \$+ y \$<=  x \$* \#2 \$+
+z\isanewline
+\isacommand{apply}\ simp\isanewline
+ 1. \#2 \$* y \$<= \#5 \$* x
+\end{isabelle}
+For more information on the integers, please see the theories on directory
+\texttt{ZF/Integ}. 
+
+\index{arithmetic|)}
+
+
+\section{Datatype definitions}
+\label{sec:ZF:datatype}
+\index{*datatype|(}
+
+The \ttindex{datatype} definition package of ZF constructs inductive datatypes
+similar to \ML's.  It can also construct coinductive datatypes
+(codatatypes), which are non-well-founded structures such as streams.  It
+defines the set using a fixed-point construction and proves induction rules,
+as well as theorems for recursion and case combinators.  It supplies
+mechanisms for reasoning about freeness.  The datatype package can handle both
+mutual and indirect recursion.
+
+
+\subsection{Basics}
+\label{subsec:datatype:basics}
+
+A \isa{datatype} definition has the following form:
+\[
+\begin{array}{llcl}
+\mathtt{datatype} & t@1(A@1,\ldots,A@h) & = &
+  constructor^1@1 ~\mid~ \ldots ~\mid~ constructor^1@{k@1} \\
+ & & \vdots \\
+\mathtt{and} & t@n(A@1,\ldots,A@h) & = &
+  constructor^n@1~ ~\mid~ \ldots ~\mid~ constructor^n@{k@n}
+\end{array}
+\]
+Here $t@1$, \ldots,~$t@n$ are identifiers and $A@1$, \ldots,~$A@h$ are
+variables: the datatype's parameters.  Each constructor specification has the
+form \dquotesoff
+\[ C \hbox{\tt~( } \hbox{\tt"} x@1 \hbox{\tt:} T@1 \hbox{\tt"},\;
+                   \ldots,\;
+                   \hbox{\tt"} x@m \hbox{\tt:} T@m \hbox{\tt"}
+     \hbox{\tt~)}
+\]
+Here $C$ is the constructor name, and variables $x@1$, \ldots,~$x@m$ are the
+constructor arguments, belonging to the sets $T@1$, \ldots, $T@m$,
+respectively.  Typically each $T@j$ is either a constant set, a datatype
+parameter (one of $A@1$, \ldots, $A@h$) or a recursive occurrence of one of
+the datatypes, say $t@i(A@1,\ldots,A@h)$.  More complex possibilities exist,
+but they are much harder to realize.  Often, additional information must be
+supplied in the form of theorems.
+
+A datatype can occur recursively as the argument of some function~$F$.  This
+is called a {\em nested} (or \emph{indirect}) occurrence.  It is only allowed
+if the datatype package is given a theorem asserting that $F$ is monotonic.
+If the datatype has indirect occurrences, then Isabelle/ZF does not support
+recursive function definitions.
+
+A simple example of a datatype is \isa{list}, which is built-in, and is
+defined by
+\begin{alltt*}\isastyleminor
+consts     list :: "i=>i"
+datatype  "list(A)" = Nil | Cons ("a \isasymin A", "l \isasymin list(A)")
+\end{alltt*}
+Note that the datatype operator must be declared as a constant first.
+However, the package declares the constructors.  Here, \isa{Nil} gets type
+$i$ and \isa{Cons} gets type $[i,i]\To i$.
+
+Trees and forests can be modelled by the mutually recursive datatype
+definition
+\begin{alltt*}\isastyleminor
+consts   
+  tree :: "i=>i"
+  forest :: "i=>i"
+  tree_forest :: "i=>i"
+datatype  "tree(A)"   = Tcons ("a{\isasymin}A",  "f{\isasymin}forest(A)")
+and "forest(A)" = Fnil | Fcons ("t{\isasymin}tree(A)",  "f{\isasymin}forest(A)")
+\end{alltt*}
+Here $\isa{tree}(A)$ is the set of trees over $A$, $\isa{forest}(A)$ is
+the set of forests over $A$, and  $\isa{tree_forest}(A)$ is the union of
+the previous two sets.  All three operators must be declared first.
+
+The datatype \isa{term}, which is defined by
+\begin{alltt*}\isastyleminor
+consts     term :: "i=>i"
+datatype  "term(A)" = Apply ("a \isasymin A", "l \isasymin list(term(A))")
+  monos list_mono
+  type_elims list_univ [THEN subsetD, elim_format]
+\end{alltt*}
+is an example of nested recursion.  (The theorem \isa{list_mono} is proved
+in theory \isa{List}, and the \isa{term} example is developed in
+theory
+\thydx{Induct/Term}.)
+
+\subsubsection{Freeness of the constructors}
+
+Constructors satisfy {\em freeness} properties.  Constructions are distinct,
+for example $\isa{Nil}\not=\isa{Cons}(a,l)$, and they are injective, for
+example $\isa{Cons}(a,l)=\isa{Cons}(a',l') \bimp a=a' \conj l=l'$.
+Because the number of freeness is quadratic in the number of constructors, the
+datatype package does not prove them.  Instead, it ensures that simplification
+will prove them dynamically: when the simplifier encounters a formula
+asserting the equality of two datatype constructors, it performs freeness
+reasoning.  
+
+Freeness reasoning can also be done using the classical reasoner, but it is
+more complicated.  You have to add some safe elimination rules rules to the
+claset.  For the \isa{list} datatype, they are called
+\isa{list.free_elims}.  Occasionally this exposes the underlying
+representation of some constructor, which can be rectified using the command
+\isa{unfold list.con_defs [symmetric]}.
+
+
+\subsubsection{Structural induction}
+
+The datatype package also provides structural induction rules.  For datatypes
+without mutual or nested recursion, the rule has the form exemplified by
+\isa{list.induct} in Fig.\ts\ref{zf-list}.  For mutually recursive
+datatypes, the induction rule is supplied in two forms.  Consider datatype
+\isa{TF}.  The rule \isa{tree_forest.induct} performs induction over a
+single predicate~\isa{P}, which is presumed to be defined for both trees
+and forests:
+\begin{alltt*}\isastyleminor
+[| x \isasymin tree_forest(A);
+   !!a f. [| a \isasymin A; f \isasymin forest(A); P(f) |] ==> P(Tcons(a, f)); 
+   P(Fnil);
+   !!f t. [| t \isasymin tree(A); P(t); f \isasymin forest(A); P(f) |]
+          ==> P(Fcons(t, f)) 
+|] ==> P(x)
+\end{alltt*}
+The rule \isa{tree_forest.mutual_induct} performs induction over two
+distinct predicates, \isa{P_tree} and \isa{P_forest}.
+\begin{alltt*}\isastyleminor
+[| !!a f.
+      [| a{\isasymin}A; f{\isasymin}forest(A); P_forest(f) |] ==> P_tree(Tcons(a,f));
+   P_forest(Fnil);
+   !!f t. [| t{\isasymin}tree(A); P_tree(t); f{\isasymin}forest(A); P_forest(f) |]
+          ==> P_forest(Fcons(t, f)) 
+|] ==> ({\isasymforall}za. za \isasymin tree(A) --> P_tree(za)) &
+    ({\isasymforall}za. za \isasymin forest(A) --> P_forest(za))
+\end{alltt*}
+
+For datatypes with nested recursion, such as the \isa{term} example from
+above, things are a bit more complicated.  The rule \isa{term.induct}
+refers to the monotonic operator, \isa{list}:
+\begin{alltt*}\isastyleminor
+[| x \isasymin term(A);
+   !!a l. [| a\isasymin{}A; l\isasymin{}list(Collect(term(A), P)) |] ==> P(Apply(a,l)) 
+|] ==> P(x)
+\end{alltt*}
+The theory \isa{Induct/Term.thy} derives two higher-level induction rules,
+one of which is particularly useful for proving equations:
+\begin{alltt*}\isastyleminor
+[| t \isasymin term(A);
+   !!x zs. [| x \isasymin A; zs \isasymin list(term(A)); map(f, zs) = map(g, zs) |]
+           ==> f(Apply(x, zs)) = g(Apply(x, zs)) 
+|] ==> f(t) = g(t)  
+\end{alltt*}
+How this can be generalized to other nested datatypes is a matter for future
+research.
+
+
+\subsubsection{The \isa{case} operator}
+
+The package defines an operator for performing case analysis over the
+datatype.  For \isa{list}, it is called \isa{list_case} and satisfies
+the equations
+\begin{ttbox}\isastyleminor
+list_case(f_Nil, f_Cons, []) = f_Nil
+list_case(f_Nil, f_Cons, Cons(a, l)) = f_Cons(a, l)
+\end{ttbox}
+Here \isa{f_Nil} is the value to return if the argument is \isa{Nil} and
+\isa{f_Cons} is a function that computes the value to return if the
+argument has the form $\isa{Cons}(a,l)$.  The function can be expressed as
+an abstraction, over patterns if desired (\S\ref{sec:pairs}).
+
+For mutually recursive datatypes, there is a single \isa{case} operator.
+In the tree/forest example, the constant \isa{tree_forest_case} handles all
+of the constructors of the two datatypes.
+
+
+\subsection{Defining datatypes}
+
+The theory syntax for datatype definitions is shown in the
+Isabelle/Isar reference manual.  In order to be well-formed, a
+datatype definition has to obey the rules stated in the previous
+section.  As a result the theory is extended with the new types, the
+constructors, and the theorems listed in the previous section.
+
+Codatatypes are declared like datatypes and are identical to them in every
+respect except that they have a coinduction rule instead of an induction rule.
+Note that while an induction rule has the effect of limiting the values
+contained in the set, a coinduction rule gives a way of constructing new
+values of the set.
+
+Most of the theorems about datatypes become part of the default simpset.  You
+never need to see them again because the simplifier applies them
+automatically.  
+
+\subsubsection{Specialized methods for datatypes}
+
+Induction and case-analysis can be invoked using these special-purpose
+methods:
+\begin{ttdescription}
+\item[\methdx{induct_tac} $x$] applies structural
+  induction on variable $x$ to subgoal~1, provided the type of $x$ is a
+  datatype.  The induction variable should not occur among other assumptions
+  of the subgoal.
+\end{ttdescription}
+% 
+% we also have the ind_cases method, but what does it do?
+In some situations, induction is overkill and a case distinction over all
+constructors of the datatype suffices.
+\begin{ttdescription}
+\item[\methdx{case_tac} $x$]
+ performs a case analysis for the variable~$x$.
+\end{ttdescription}
+
+Both tactics can only be applied to a variable, whose typing must be given in
+some assumption, for example the assumption \isa{x \isasymin \ list(A)}.  The tactics
+also work for the natural numbers (\isa{nat}) and disjoint sums, although
+these sets were not defined using the datatype package.  (Disjoint sums are
+not recursive, so only \isa{case_tac} is available.)
+
+Structured Isar methods are also available. Below, $t$ 
+stands for the name of the datatype.
+\begin{ttdescription}
+\item[\methdx{induct} \isa{set:}\ $t$] is the Isar induction tactic.
+\item[\methdx{cases} \isa{set:}\ $t$] is the Isar case-analysis tactic.
+\end{ttdescription}
+
+
+\subsubsection{The theorems proved by a datatype declaration}
+
+Here are some more details for the technically minded.  Processing the
+datatype declaration of a set~$t$ produces a name space~$t$ containing
+the following theorems:
+\begin{ttbox}\isastyleminor
+intros          \textrm{the introduction rules}
+cases           \textrm{the case analysis rule}
+induct          \textrm{the standard induction rule}
+mutual_induct   \textrm{the mutual induction rule, if needed}
+case_eqns       \textrm{equations for the case operator}
+recursor_eqns   \textrm{equations for the recursor}
+simps           \textrm{the union of} case_eqns \textrm{and} recursor_eqns
+con_defs        \textrm{definitions of the case operator and constructors}
+free_iffs       \textrm{logical equivalences for proving freeness}
+free_elims      \textrm{elimination rules for proving freeness}
+defs            \textrm{datatype definition(s)}
+\end{ttbox}
+Furthermore there is the theorem $C$ for every constructor~$C$; for
+example, the \isa{list} datatype's introduction rules are bound to the
+identifiers \isa{Nil} and \isa{Cons}.
+
+For a codatatype, the component \isa{coinduct} is the coinduction rule,
+replacing the \isa{induct} component.
+
+See the theories \isa{Induct/Ntree} and \isa{Induct/Brouwer} for examples of
+infinitely branching datatypes.  See theory \isa{Induct/LList} for an example
+of a codatatype.  Some of these theories illustrate the use of additional,
+undocumented features of the datatype package.  Datatype definitions are
+reduced to inductive definitions, and the advanced features should be
+understood in that light.
+
+
+\subsection{Examples}
+
+\subsubsection{The datatype of binary trees}
+
+Let us define the set $\isa{bt}(A)$ of binary trees over~$A$.  The theory
+must contain these lines:
+\begin{alltt*}\isastyleminor
+consts   bt :: "i=>i"
+datatype "bt(A)" = Lf | Br ("a\isasymin{}A", "t1\isasymin{}bt(A)", "t2\isasymin{}bt(A)")
+\end{alltt*}
+After loading the theory, we can prove some theorem.  
+We begin by declaring the constructor's typechecking rules
+as simplification rules:
+\begin{isabelle}
+\isacommand{declare}\ bt.intros\ [simp]%
+\end{isabelle}
+
+Our first example is the theorem that no tree equals its
+left branch.  To make the inductive hypothesis strong enough, 
+the proof requires a quantified induction formula, but 
+the \isa{rule\_format} attribute will remove the quantifiers 
+before the theorem is stored.
+\begin{isabelle}
+\isacommand{lemma}\ Br\_neq\_left\ [rule\_format]:\ "l\isasymin bt(A)\ ==>\ \isasymforall x\ r.\ Br(x,l,r)\isasymnoteq{}l"\isanewline
+\ 1.\ l\ \isasymin \ bt(A)\ \isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l%
+\end{isabelle}
+This can be proved by the structural induction tactic:
+\begin{isabelle}
+\ \ \isacommand{apply}\ (induct\_tac\ l)\isanewline
+\ 1.\ \isasymforall x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
+\ 2.\ \isasymAnd a\ t1\ t2.\isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymforall x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
+\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymforall x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ \isasymforall x\ r.\ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
+\end{isabelle}
+Both subgoals are proved using \isa{auto}, which performs the necessary
+freeness reasoning. 
+\begin{isabelle}
+\ \ \isacommand{apply}\ auto\isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+
+An alternative proof uses Isar's fancy \isa{induct} method, which 
+automatically quantifies over all free variables:
+
+\begin{isabelle}
+\isacommand{lemma}\ Br\_neq\_left':\ "l\ \isasymin \ bt(A)\ ==>\ (!!x\ r.\ Br(x,\ l,\ r)\ \isasymnoteq \ l)"\isanewline
+\ \ \isacommand{apply}\ (induct\ set:\ bt)\isanewline
+\ 1.\ \isasymAnd x\ r.\ Br(x,\ Lf,\ r)\ \isasymnoteq \ Lf\isanewline
+\ 2.\ \isasymAnd a\ t1\ t2\ x\ r.\isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymlbrakk a\ \isasymin \ A;\ t1\ \isasymin \ bt(A);\ \isasymAnd x\ r.\ Br(x,\ t1,\ r)\ \isasymnoteq \ t1;\ t2\ \isasymin \ bt(A);\isanewline
+\isaindent{\ 2.\ \ \ \ \ \ \ }\isasymAnd x\ r.\ Br(x,\ t2,\ r)\ \isasymnoteq \ t2\isasymrbrakk \isanewline
+\isaindent{\ 2.\ \ \ \ }\isasymLongrightarrow \ Br(x,\ Br(a,\ t1,\ t2),\ r)\ \isasymnoteq \ Br(a,\ t1,\ t2)
+\end{isabelle}
+Compare the form of the induction hypotheses with the corresponding ones in
+the previous proof. As before, to conclude requires only \isa{auto}.
+
+When there are only a few constructors, we might prefer to prove the freenness
+theorems for each constructor.  This is simple:
+\begin{isabelle}
+\isacommand{lemma}\ Br\_iff:\ "Br(a,l,r)\ =\ Br(a',l',r')\ <->\ a=a'\ \&\ l=l'\ \&\ r=r'"\isanewline
+\ \ \isacommand{by}\ (blast\ elim!:\ bt.free\_elims)
+\end{isabelle}
+Here we see a demonstration of freeness reasoning using
+\isa{bt.free\_elims}, but simpler still is just to apply \isa{auto}.
+
+An \ttindex{inductive\_cases} declaration generates instances of the
+case analysis rule that have been simplified  using freeness
+reasoning. 
+\begin{isabelle}
+\isacommand{inductive\_cases}\ Br\_in\_bt:\ "Br(a,\ l,\ r)\ \isasymin \ bt(A)"
+\end{isabelle}
+The theorem just created is 
+\begin{isabelle}
+\isasymlbrakk Br(a,\ l,\ r)\ \isasymin \ bt(A);\ \isasymlbrakk a\ \isasymin \ A;\ l\ \isasymin \ bt(A);\ r\ \isasymin \ bt(A)\isasymrbrakk \ \isasymLongrightarrow \ Q\isasymrbrakk \ \isasymLongrightarrow \ Q.
+\end{isabelle}
+It is an elimination rule that from $\isa{Br}(a,l,r)\in\isa{bt}(A)$
+lets us infer $a\in A$, $l\in\isa{bt}(A)$ and
+$r\in\isa{bt}(A)$.
+
+
+\subsubsection{Mixfix syntax in datatypes}
+
+Mixfix syntax is sometimes convenient.  The theory \isa{Induct/PropLog} makes a
+deep embedding of propositional logic:
+\begin{alltt*}\isastyleminor
+consts     prop :: i
+datatype  "prop" = Fls
+                 | Var ("n \isasymin nat")                ("#_" [100] 100)
+                 | "=>" ("p \isasymin prop", "q \isasymin prop")   (infixr 90)
+\end{alltt*}
+The second constructor has a special $\#n$ syntax, while the third constructor
+is an infixed arrow.
+
+
+\subsubsection{A giant enumeration type}
+
+This example shows a datatype that consists of 60 constructors:
+\begin{alltt*}\isastyleminor
+consts  enum :: i
+datatype
+  "enum" = C00 | C01 | C02 | C03 | C04 | C05 | C06 | C07 | C08 | C09
+         | C10 | C11 | C12 | C13 | C14 | C15 | C16 | C17 | C18 | C19
+         | C20 | C21 | C22 | C23 | C24 | C25 | C26 | C27 | C28 | C29
+         | C30 | C31 | C32 | C33 | C34 | C35 | C36 | C37 | C38 | C39
+         | C40 | C41 | C42 | C43 | C44 | C45 | C46 | C47 | C48 | C49
+         | C50 | C51 | C52 | C53 | C54 | C55 | C56 | C57 | C58 | C59
+end
+\end{alltt*}
+The datatype package scales well.  Even though all properties are proved
+rather than assumed, full processing of this definition takes around two seconds
+(on a 1.8GHz machine).  The constructors have a balanced representation,
+related to binary notation, so freeness properties can be proved fast.
+\begin{isabelle}
+\isacommand{lemma}\ "C00 \isasymnoteq\ C01"\isanewline
+\ \ \isacommand{by}\ simp
+\end{isabelle}
+You need not derive such inequalities explicitly.  The simplifier will
+dispose of them automatically.
+
+\index{*datatype|)}
+
+
+\subsection{Recursive function definitions}\label{sec:ZF:recursive}
+\index{recursive functions|see{recursion}}
+\index{*primrec|(}
+\index{recursion!primitive|(}
+
+Datatypes come with a uniform way of defining functions, {\bf primitive
+  recursion}.  Such definitions rely on the recursion operator defined by the
+datatype package.  Isabelle proves the desired recursion equations as
+theorems.
+
+In principle, one could introduce primitive recursive functions by asserting
+their reduction rules as axioms.  Here is a dangerous way of defining a
+recursive function over binary trees:
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
+\isacommand{axioms}\isanewline
+\ \ n\_nodes\_Lf:\ "n\_nodes(Lf)\ =\ 0"\isanewline
+\ \ n\_nodes\_Br:\ "n\_nodes(Br(a,l,r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
+\end{isabelle}
+Asserting axioms brings the danger of accidentally introducing
+contradictions.  It should be avoided whenever possible.
+
+The \ttindex{primrec} declaration is a safe means of defining primitive
+recursive functions on datatypes:
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\ ::\ "i\ =>\ i"\isanewline
+\isacommand{primrec}\isanewline
+\ \ "n\_nodes(Lf)\ =\ 0"\isanewline
+\ \ "n\_nodes(Br(a,\ l,\ r))\ =\ succ(n\_nodes(l)\ \#+\ n\_nodes(r))"
+\end{isabelle}
+Isabelle will now derive the two equations from a low-level definition  
+based upon well-founded recursion.  If they do not define a legitimate
+recursion, then Isabelle will reject the declaration.
+
+
+\subsubsection{Syntax of recursive definitions}
+
+The general form of a primitive recursive definition is
+\begin{ttbox}\isastyleminor
+primrec
+    {\it reduction rules}
+\end{ttbox}
+where \textit{reduction rules} specify one or more equations of the form
+\[ f \, x@1 \, \dots \, x@m \, (C \, y@1 \, \dots \, y@k) \, z@1 \,
+\dots \, z@n = r \] such that $C$ is a constructor of the datatype, $r$
+contains only the free variables on the left-hand side, and all recursive
+calls in $r$ are of the form $f \, \dots \, y@i \, \dots$ for some $i$.  
+There must be at most one reduction rule for each constructor.  The order is
+immaterial.  For missing constructors, the function is defined to return zero.
+
+All reduction rules are added to the default simpset.
+If you would like to refer to some rule by name, then you must prefix
+the rule with an identifier.  These identifiers, like those in the
+\isa{rules} section of a theory, will be visible in proof scripts.
+
+The reduction rules become part of the default simpset, which
+leads to short proof scripts:
+\begin{isabelle}
+\isacommand{lemma}\ n\_nodes\_type\ [simp]:\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes(t)\ \isasymin \ nat"\isanewline
+\ \ \isacommand{by}\ (induct\_tac\ t,\ auto)
+\end{isabelle}
+
+You can even use the \isa{primrec} form with non-recursive datatypes and
+with codatatypes.  Recursion is not allowed, but it provides a convenient
+syntax for defining functions by cases.
+
+
+\subsubsection{Example: varying arguments}
+
+All arguments, other than the recursive one, must be the same in each equation
+and in each recursive call.  To get around this restriction, use explict
+$\lambda$-abstraction and function application.  For example, let us
+define the tail-recursive version of \isa{n\_nodes}, using an 
+accumulating argument for the counter.  The second argument, $k$, varies in
+recursive calls.
+\begin{isabelle}
+\isacommand{consts}\ \ n\_nodes\_aux\ ::\ "i\ =>\ i"\isanewline
+\isacommand{primrec}\isanewline
+\ \ "n\_nodes\_aux(Lf)\ =\ (\isasymlambda k\ \isasymin \ nat.\ k)"\isanewline
+\ \ "n\_nodes\_aux(Br(a,l,r))\ =\ \isanewline
+\ \ \ \ \ \ (\isasymlambda k\ \isasymin \ nat.\ n\_nodes\_aux(r)\ `\ \ (n\_nodes\_aux(l)\ `\ succ(k)))"
+\end{isabelle}
+Now \isa{n\_nodes\_aux(t)\ `\ k} is our function in two arguments. We
+can prove a theorem relating it to \isa{n\_nodes}. Note the quantification
+over \isa{k\ \isasymin \ nat}:
+\begin{isabelle}
+\isacommand{lemma}\ n\_nodes\_aux\_eq\ [rule\_format]:\isanewline
+\ \ \ \ \ "t\ \isasymin \ bt(A)\ ==>\ \isasymforall k\ \isasymin \ nat.\ n\_nodes\_aux(t)`k\ =\ n\_nodes(t)\ \#+\ k"\isanewline
+\ \ \isacommand{by}\ (induct\_tac\ t,\ simp\_all)
+\end{isabelle}
+
+Now, we can use \isa{n\_nodes\_aux} to define a tail-recursive version
+of \isa{n\_nodes}:
+\begin{isabelle}
+\isacommand{constdefs}\isanewline
+\ \ n\_nodes\_tail\ ::\ "i\ =>\ i"\isanewline
+\ \ \ "n\_nodes\_tail(t)\ ==\ n\_nodes\_aux(t)\ `\ 0"
+\end{isabelle}
+It is easy to
+prove that \isa{n\_nodes\_tail} is equivalent to \isa{n\_nodes}:
+\begin{isabelle}
+\isacommand{lemma}\ "t\ \isasymin \ bt(A)\ ==>\ n\_nodes\_tail(t)\ =\ n\_nodes(t)"\isanewline
+\ \isacommand{by}\ (simp\ add:\ n\_nodes\_tail\_def\ n\_nodes\_aux\_eq)
+\end{isabelle}
+
+
+
+
+\index{recursion!primitive|)}
+\index{*primrec|)}
+
+
+\section{Inductive and coinductive definitions}
+\index{*inductive|(}
+\index{*coinductive|(}
+
+An {\bf inductive definition} specifies the least set~$R$ closed under given
+rules.  (Applying a rule to elements of~$R$ yields a result within~$R$.)  For
+example, a structural operational semantics is an inductive definition of an
+evaluation relation.  Dually, a {\bf coinductive definition} specifies the
+greatest set~$R$ consistent with given rules.  (Every element of~$R$ can be
+seen as arising by applying a rule to elements of~$R$.)  An important example
+is using bisimulation relations to formalise equivalence of processes and
+infinite data structures.
+
+A theory file may contain any number of inductive and coinductive
+definitions.  They may be intermixed with other declarations; in
+particular, the (co)inductive sets {\bf must} be declared separately as
+constants, and may have mixfix syntax or be subject to syntax translations.
+
+Each (co)inductive definition adds definitions to the theory and also
+proves some theorems.  It behaves identially to the analogous
+inductive definition except that instead of an induction rule there is
+a coinduction rule.  Its treatment of coinduction is described in
+detail in a separate paper,%
+\footnote{It appeared in CADE~\cite{paulson-CADE}; a longer version is
+  distributed with Isabelle as \emph{A Fixedpoint Approach to 
+ (Co)Inductive and (Co)Datatype Definitions}.}  %
+which you might refer to for background information.
+
+
+\subsection{The syntax of a (co)inductive definition}
+An inductive definition has the form
+\begin{ttbox}\isastyleminor
+inductive
+  domains     {\it domain declarations}
+  intros      {\it introduction rules}
+  monos       {\it monotonicity theorems}
+  con_defs    {\it constructor definitions}
+  type_intros {\it introduction rules for type-checking}
+  type_elims  {\it elimination rules for type-checking}
+\end{ttbox}
+A coinductive definition is identical, but starts with the keyword
+\isa{co\-inductive}.  
+
+The \isa{monos}, \isa{con\_defs}, \isa{type\_intros} and \isa{type\_elims}
+sections are optional.  If present, each is specified as a list of
+theorems, which may contain Isar attributes as usual.
+
+\begin{description}
+\item[\it domain declarations] are items of the form
+  {\it string\/}~\isa{\isasymsubseteq }~{\it string}, associating each recursive set with
+  its domain.  (The domain is some existing set that is large enough to
+  hold the new set being defined.)
+
+\item[\it introduction rules] specify one or more introduction rules in
+  the form {\it ident\/}~{\it string}, where the identifier gives the name of
+  the rule in the result structure.
+
+\item[\it monotonicity theorems] are required for each operator applied to
+  a recursive set in the introduction rules.  There \textbf{must} be a theorem
+  of the form $A\subseteq B\Imp M(A)\subseteq M(B)$, for each premise $t\in M(R_i)$
+  in an introduction rule!
+
+\item[\it constructor definitions] contain definitions of constants
+  appearing in the introduction rules.  The (co)datatype package supplies
+  the constructors' definitions here.  Most (co)inductive definitions omit
+  this section; one exception is the primitive recursive functions example;
+  see theory \isa{Induct/Primrec}.
+  
+\item[\it type\_intros] consists of introduction rules for type-checking the
+  definition: for demonstrating that the new set is included in its domain.
+  (The proof uses depth-first search.)
+
+\item[\it type\_elims] consists of elimination rules for type-checking the
+  definition.  They are presumed to be safe and are applied as often as
+  possible prior to the \isa{type\_intros} search.
+\end{description}
+
+The package has a few restrictions:
+\begin{itemize}
+\item The theory must separately declare the recursive sets as
+  constants.
+
+\item The names of the recursive sets must be identifiers, not infix
+operators.  
+
+\item Side-conditions must not be conjunctions.  However, an introduction rule
+may contain any number of side-conditions.
+
+\item Side-conditions of the form $x=t$, where the variable~$x$ does not
+  occur in~$t$, will be substituted through the rule \isa{mutual\_induct}.
+\end{itemize}
+
+
+\subsection{Example of an inductive definition}
+
+Below, we shall see how Isabelle/ZF defines the finite powerset
+operator.  The first step is to declare the constant~\isa{Fin}.  Then we
+must declare it inductively, with two introduction rules:
+\begin{isabelle}
+\isacommand{consts}\ \ Fin\ ::\ "i=>i"\isanewline
+\isacommand{inductive}\isanewline
+\ \ \isakeyword{domains}\ \ \ "Fin(A)"\ \isasymsubseteq\ "Pow(A)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ emptyI:\ \ "0\ \isasymin\ Fin(A)"\isanewline
+\ \ \ \ consI:\ \ \ "[|\ a\ \isasymin\ A;\ \ b\ \isasymin\ Fin(A)\ |]\ ==>\ cons(a,b)\ \isasymin\ Fin(A)"\isanewline
+\ \ \isakeyword{type\_intros}\ \ empty\_subsetI\ cons\_subsetI\ PowI\isanewline
+\ \ \isakeyword{type\_elims}\ \ \ PowD\ [THEN\ revcut\_rl]\end{isabelle}
+The resulting theory contains a name space, called~\isa{Fin}.
+The \isa{Fin}$~A$ introduction rules can be referred to collectively as
+\isa{Fin.intros}, and also individually as \isa{Fin.emptyI} and
+\isa{Fin.consI}.  The induction rule is \isa{Fin.induct}.
+
+The chief problem with making (co)inductive definitions involves type-checking
+the rules.  Sometimes, additional theorems need to be supplied under
+\isa{type_intros} or \isa{type_elims}.  If the package fails when trying
+to prove your introduction rules, then set the flag \ttindexbold{trace_induct}
+to \isa{true} and try again.  (See the manual \emph{A Fixedpoint Approach
+  \ldots} for more discussion of type-checking.)
+
+In the example above, $\isa{Pow}(A)$ is given as the domain of
+$\isa{Fin}(A)$, for obviously every finite subset of~$A$ is a subset
+of~$A$.  However, the inductive definition package can only prove that given a
+few hints.
+Here is the output that results (with the flag set) when the
+\isa{type_intros} and \isa{type_elims} are omitted from the inductive
+definition above:
+\begin{alltt*}\isastyleminor
+Inductive definition Finite.Fin
+Fin(A) ==
+lfp(Pow(A),
+    \%X. {z\isasymin{}Pow(A) . z = 0 | ({\isasymexists}a b. z = cons(a,b) & a\isasymin{}A & b\isasymin{}X)})
+  Proving monotonicity...
+\ttbreak
+  Proving the introduction rules...
+The type-checking subgoal:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+*** prove_goal: tactic failed
+\end{alltt*}
+We see the need to supply theorems to let the package prove
+$\emptyset\in\isa{Pow}(A)$.  Restoring the \isa{type_intros} but not the
+\isa{type_elims}, we again get an error message:
+\begin{alltt*}\isastyleminor
+The type-checking subgoal:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+0 \isasymin Fin(A)
+ 1. 0 \isasymin Pow(A)
+\ttbreak
+The type-checking subgoal:
+cons(a, b) \isasymin Fin(A)
+ 1. [| a \isasymin A; b \isasymin Fin(A) |] ==> cons(a, b) \isasymin Pow(A)
+\ttbreak
+The subgoal after monos, type_elims:
+cons(a, b) \isasymin Fin(A)
+ 1. [| a \isasymin A; b \isasymin Pow(A) |] ==> cons(a, b) \isasymin Pow(A)
+*** prove_goal: tactic failed
+\end{alltt*}
+The first rule has been type-checked, but the second one has failed.  The
+simplest solution to such problems is to prove the failed subgoal separately
+and to supply it under \isa{type_intros}.  The solution actually used is
+to supply, under \isa{type_elims}, a rule that changes
+$b\in\isa{Pow}(A)$ to $b\subseteq A$; together with \isa{cons_subsetI}
+and \isa{PowI}, it is enough to complete the type-checking.
+
+
+
+\subsection{Further examples}
+
+An inductive definition may involve arbitrary monotonic operators.  Here is a
+standard example: the accessible part of a relation.  Note the use
+of~\isa{Pow} in the introduction rule and the corresponding mention of the
+rule \isa{Pow\_mono} in the \isa{monos} list.  If the desired rule has a
+universally quantified premise, usually the effect can be obtained using
+\isa{Pow}.
+\begin{isabelle}
+\isacommand{consts}\ \ acc\ ::\ "i\ =>\ i"\isanewline
+\isacommand{inductive}\isanewline
+\ \ \isakeyword{domains}\ "acc(r)"\ \isasymsubseteq \ "field(r)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ vimage:\ \ "[|\ r-``\isacharbraceleft a\isacharbraceright\ \isasymin\ Pow(acc(r));\ a\ \isasymin \ field(r)\ |]
+\isanewline
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ ==>\ a\ \isasymin \ acc(r)"\isanewline
+\ \ \isakeyword{monos}\ \ Pow\_mono
+\end{isabelle}
+
+Finally, here are some coinductive definitions.  We begin by defining
+lazy (potentially infinite) lists as a codatatype:
+\begin{isabelle}
+\isacommand{consts}\ \ llist\ \ ::\ "i=>i"\isanewline
+\isacommand{codatatype}\isanewline
+\ \ "llist(A)"\ =\ LNil\ |\ LCons\ ("a\ \isasymin \ A",\ "l\ \isasymin \ llist(A)")\isanewline
+\end{isabelle}
+
+The notion of equality on such lists is modelled as a bisimulation:
+\begin{isabelle}
+\isacommand{consts}\ \ lleq\ ::\ "i=>i"\isanewline
+\isacommand{coinductive}\isanewline
+\ \ \isakeyword{domains}\ "lleq(A)"\ <=\ "llist(A)\ *\ llist(A)"\isanewline
+\ \ \isakeyword{intros}\isanewline
+\ \ \ \ LNil:\ \ "<LNil,\ LNil>\ \isasymin \ lleq(A)"\isanewline
+\ \ \ \ LCons:\ "[|\ a\ \isasymin \ A;\ <l,l'>\ \isasymin \ lleq(A)\ |]\ \isanewline
+\ \ \ \ \ \ \ \ \ \ \ \ ==>\ <LCons(a,l),\ LCons(a,l')>\ \isasymin \ lleq(A)"\isanewline
+\ \ \isakeyword{type\_intros}\ \ llist.intros
+\end{isabelle}
+This use of \isa{type_intros} is typical: the relation concerns the
+codatatype \isa{llist}, so naturally the introduction rules for that
+codatatype will be required for type-checking the rules.
+
+The Isabelle distribution contains many other inductive definitions.  Simple
+examples are collected on subdirectory \isa{ZF/Induct}.  The directory
+\isa{Coind} and the theory \isa{ZF/Induct/LList} contain coinductive
+definitions.  Larger examples may be found on other subdirectories of
+\isa{ZF}, such as \isa{IMP}, and \isa{Resid}.
+
+
+\subsection{Theorems generated}
+
+Each (co)inductive set defined in a theory file generates a name space
+containing the following elements:
+\begin{ttbox}\isastyleminor
+intros        \textrm{the introduction rules}
+elim          \textrm{the elimination (case analysis) rule}
+induct        \textrm{the standard induction rule}
+mutual_induct \textrm{the mutual induction rule, if needed}
+defs          \textrm{definitions of inductive sets}
+bnd_mono      \textrm{monotonicity property}
+dom_subset    \textrm{inclusion in `bounding set'}
+\end{ttbox}
+Furthermore, each introduction rule is available under its declared
+name. For a codatatype, the component \isa{coinduct} is the coinduction rule,
+replacing the \isa{induct} component.
+
+Recall that the \ttindex{inductive\_cases} declaration generates
+simplified instances of the case analysis rule.  It is as useful for
+inductive definitions as it is for datatypes.  There are many examples
+in the theory
+\isa{Induct/Comb}, which is discussed at length
+elsewhere~\cite{paulson-generic}.  The theory first defines the
+datatype
+\isa{comb} of combinators:
+\begin{alltt*}\isastyleminor
+consts comb :: i
+datatype  "comb" = K
+                 | S
+                 | "#" ("p \isasymin comb", "q \isasymin comb")   (infixl 90)
+\end{alltt*}
+The theory goes on to define contraction and parallel contraction
+inductively.  Then the theory \isa{Induct/Comb.thy} defines special
+cases of contraction, such as this one:
+\begin{isabelle}
+\isacommand{inductive\_cases}\ K\_contractE [elim!]:\ "K -1-> r"
+\end{isabelle}
+The theorem just created is \isa{K -1-> r \ \isasymLongrightarrow \ Q},
+which expresses that the combinator \isa{K} cannot reduce to
+anything.  (From the assumption \isa{K-1->r}, we can conclude any desired
+formula \isa{Q}\@.)  Similar elimination rules for \isa{S} and application are also
+generated. The attribute \isa{elim!}\ shown above supplies the generated
+theorem to the classical reasoner.  This mode of working allows
+effective reasoniung about operational semantics.
+
+\index{*coinductive|)} \index{*inductive|)}
+
+
+
+\section{The outer reaches of set theory}
+
+The constructions of the natural numbers and lists use a suite of
+operators for handling recursive function definitions.  I have described
+the developments in detail elsewhere~\cite{paulson-set-II}.  Here is a brief
+summary:
+\begin{itemize}
+  \item Theory \isa{Trancl} defines the transitive closure of a relation
+    (as a least fixedpoint).
+
+  \item Theory \isa{WF} proves the well-founded recursion theorem, using an
+    elegant approach of Tobias Nipkow.  This theorem permits general
+    recursive definitions within set theory.
+
+  \item Theory \isa{Ord} defines the notions of transitive set and ordinal
+    number.  It derives transfinite induction.  A key definition is {\bf
+      less than}: $i<j$ if and only if $i$ and $j$ are both ordinals and
+    $i\in j$.  As a special case, it includes less than on the natural
+    numbers.
+    
+  \item Theory \isa{Epsilon} derives $\varepsilon$-induction and
+    $\varepsilon$-recursion, which are generalisations of transfinite
+    induction and recursion.  It also defines \cdx{rank}$(x)$, which is the
+    least ordinal $\alpha$ such that $x$ is constructed at stage $\alpha$ of
+    the cumulative hierarchy (thus $x\in V@{\alpha+1}$).
+\end{itemize}
+
+Other important theories lead to a theory of cardinal numbers.  They have
+not yet been written up anywhere.  Here is a summary:
+\begin{itemize}
+\item Theory \isa{Rel} defines the basic properties of relations, such as
+  reflexivity, symmetry and transitivity.
+
+\item Theory \isa{EquivClass} develops a theory of equivalence
+  classes, not using the Axiom of Choice.
+
+\item Theory \isa{Order} defines partial orderings, total orderings and
+  wellorderings.
+
+\item Theory \isa{OrderArith} defines orderings on sum and product sets.
+  These can be used to define ordinal arithmetic and have applications to
+  cardinal arithmetic.
+
+\item Theory \isa{OrderType} defines order types.  Every wellordering is
+  equivalent to a unique ordinal, which is its order type.
+
+\item Theory \isa{Cardinal} defines equipollence and cardinal numbers.
+ 
+\item Theory \isa{CardinalArith} defines cardinal addition and
+  multiplication, and proves their elementary laws.  It proves that there
+  is no greatest cardinal.  It also proves a deep result, namely
+  $\kappa\otimes\kappa=\kappa$ for every infinite cardinal~$\kappa$; see
+  Kunen~\cite[page 29]{kunen80}.  None of these results assume the Axiom of
+  Choice, which complicates their proofs considerably.  
+\end{itemize}
+
+The following developments involve the Axiom of Choice (AC):
+\begin{itemize}
+\item Theory \isa{AC} asserts the Axiom of Choice and proves some simple
+  equivalent forms.
+
+\item Theory \isa{Zorn} proves Hausdorff's Maximal Principle, Zorn's Lemma
+  and the Wellordering Theorem, following Abrial and
+  Laffitte~\cite{abrial93}.
+
+\item Theory \isa{Cardinal\_AC} uses AC to prove simplified theorems about
+  the cardinals.  It also proves a theorem needed to justify
+  infinitely branching datatype declarations: if $\kappa$ is an infinite
+  cardinal and $|X(\alpha)| \le \kappa$ for all $\alpha<\kappa$ then
+  $|\union\sb{\alpha<\kappa} X(\alpha)| \le \kappa$.
+
+\item Theory \isa{InfDatatype} proves theorems to justify infinitely
+  branching datatypes.  Arbitrary index sets are allowed, provided their
+  cardinalities have an upper bound.  The theory also justifies some
+  unusual cases of finite branching, involving the finite powerset operator
+  and the finite function space operator.
+\end{itemize}
+
+
+
+\section{The examples directories}
+Directory \isa{HOL/IMP} contains a mechanised version of a semantic
+equivalence proof taken from Winskel~\cite{winskel93}.  It formalises the
+denotational and operational semantics of a simple while-language, then
+proves the two equivalent.  It contains several datatype and inductive
+definitions, and demonstrates their use.
+
+The directory \isa{ZF/ex} contains further developments in ZF set theory.
+Here is an overview; see the files themselves for more details.  I describe
+much of this material in other
+publications~\cite{paulson-set-I,paulson-set-II,paulson-fixedpt-milner}.
+\begin{itemize}
+\item File \isa{misc.ML} contains miscellaneous examples such as
+  Cantor's Theorem, the Schr\"oder-Bernstein Theorem and the `Composition
+  of homomorphisms' challenge~\cite{boyer86}.
+
+\item Theory \isa{Ramsey} proves the finite exponent 2 version of
+  Ramsey's Theorem, following Basin and Kaufmann's
+  presentation~\cite{basin91}.
+
+\item Theory \isa{Integ} develops a theory of the integers as
+  equivalence classes of pairs of natural numbers.
+
+\item Theory \isa{Primrec} develops some computation theory.  It
+  inductively defines the set of primitive recursive functions and presents a
+  proof that Ackermann's function is not primitive recursive.
+
+\item Theory \isa{Primes} defines the Greatest Common Divisor of two
+  natural numbers and and the ``divides'' relation.
+
+\item Theory \isa{Bin} defines a datatype for two's complement binary
+  integers, then proves rewrite rules to perform binary arithmetic.  For
+  instance, $1359\times {-}2468 = {-}3354012$ takes 0.3 seconds.
+
+\item Theory \isa{BT} defines the recursive data structure $\isa{bt}(A)$, labelled binary trees.
+
+\item Theory \isa{Term} defines a recursive data structure for terms
+  and term lists.  These are simply finite branching trees.
+
+\item Theory \isa{TF} defines primitives for solving mutually
+  recursive equations over sets.  It constructs sets of trees and forests
+  as an example, including induction and recursion rules that handle the
+  mutual recursion.
+
+\item Theory \isa{Prop} proves soundness and completeness of
+  propositional logic~\cite{paulson-set-II}.  This illustrates datatype
+  definitions, inductive definitions, structural induction and rule
+  induction.
+
+\item Theory \isa{ListN} inductively defines the lists of $n$
+  elements~\cite{paulin-tlca}.
+
+\item Theory \isa{Acc} inductively defines the accessible part of a
+  relation~\cite{paulin-tlca}.
+
+\item Theory \isa{Comb} defines the datatype of combinators and
+  inductively defines contraction and parallel contraction.  It goes on to
+  prove the Church-Rosser Theorem.  This case study follows Camilleri and
+  Melham~\cite{camilleri92}.
+
+\item Theory \isa{LList} defines lazy lists and a coinduction
+  principle for proving equations between them.
+\end{itemize}
+
+
+\section{A proof about powersets}\label{sec:ZF-pow-example}
+To demonstrate high-level reasoning about subsets, let us prove the
+equation ${\isa{Pow}(A)\cap \isa{Pow}(B)}= \isa{Pow}(A\cap B)$.  Compared
+with first-order logic, set theory involves a maze of rules, and theorems
+have many different proofs.  Attempting other proofs of the theorem might
+be instructive.  This proof exploits the lattice properties of
+intersection.  It also uses the monotonicity of the powerset operation,
+from \isa{ZF/mono.ML}:
+\begin{isabelle}
+\tdx{Pow_mono}:     A \isasymsubseteq B ==> Pow(A) \isasymsubseteq Pow(B)
+\end{isabelle}
+We enter the goal and make the first step, which breaks the equation into
+two inclusions by extensionality:\index{*equalityI theorem}
+\begin{isabelle}
+\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ =\ Pow(A)\ \isasyminter \ Pow(B)\isanewline
+\isacommand{apply}\ (rule\ equalityI)\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\ \isasyminter \ Pow(B)\isanewline
+\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+Both inclusions could be tackled straightforwardly using \isa{subsetI}.
+A shorter proof results from noting that intersection forms the greatest
+lower bound:\index{*Int_greatest theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(A)\isanewline
+\ 2.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
+\ 3.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+Subgoal~1 follows by applying the monotonicity of \isa{Pow} to $A\int
+B\subseteq A$; subgoal~2 follows similarly:
+\index{*Int_lower1 theorem}\index{*Int_lower2 theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_lower1\ [THEN\ Pow\_mono])\isanewline
+\ 1.\ Pow(A\ \isasyminter \ B)\ \isasymsubseteq \ Pow(B)\isanewline
+\ 2.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\isanewline
+\isacommand{apply}\ (rule\ Int\_lower2\ [THEN\ Pow\_mono])\isanewline
+\ 1.\ Pow(A)\ \isasyminter \ Pow(B)\ \isasymsubseteq \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+We are left with the opposite inclusion, which we tackle in the
+straightforward way:\index{*subsetI theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ subsetI)\isanewline
+\ 1.\ \isasymAnd x.\ x\ \isasymin \ Pow(A)\ \isasyminter \ Pow(B)\ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+The subgoal is to show $x\in \isa{Pow}(A\cap B)$ assuming $x\in\isa{Pow}(A)\cap \isa{Pow}(B)$; eliminating this assumption produces two
+subgoals.  The rule \tdx{IntE} treats the intersection like a conjunction
+instead of unfolding its definition.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ IntE)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ Pow(A\ \isasyminter \ B)
+\end{isabelle}
+The next step replaces the \isa{Pow} by the subset
+relation~($\subseteq$).\index{*PowI theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ PowI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymin \ Pow(A);\ x\ \isasymin \ Pow(B)\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
+\end{isabelle}
+We perform the same replacement in the assumptions.  This is a good
+demonstration of the tactic \ttindex{drule}:\index{*PowD theorem}
+\begin{isabelle}
+\isacommand{apply}\ (drule\ PowD)+\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\ \isasyminter \ B%
+\end{isabelle}
+The assumptions are that $x$ is a lower bound of both $A$ and~$B$, but
+$A\int B$ is the greatest lower bound:\index{*Int_greatest theorem}
+\begin{isabelle}
+\isacommand{apply}\ (rule\ Int\_greatest)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ A\isanewline
+\ 2.\ \isasymAnd x.\ \isasymlbrakk x\ \isasymsubseteq \ A;\ x\ \isasymsubseteq \ B\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymsubseteq \ B%
+\end{isabelle}
+To conclude the proof, we clear up the trivial subgoals:
+\begin{isabelle}
+\isacommand{apply}\ (assumption+)\isanewline
+\isacommand{done}%
+\end{isabelle}
+
+We could have performed this proof instantly by calling
+\ttindex{blast}:
+\begin{isabelle}
+\isacommand{lemma}\ "Pow(A\ Int\ B)\ =\ Pow(A)\ Int\ Pow(B)"\isanewline
+\isacommand{by}
+\end{isabelle}
+Past researchers regarded this as a difficult proof, as indeed it is if all
+the symbols are replaced by their definitions.
+\goodbreak
+
+\section{Monotonicity of the union operator}
+For another example, we prove that general union is monotonic:
+${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
+tackle the inclusion using \tdx{subsetI}:
+\begin{isabelle}
+\isacommand{lemma}\ "C\isasymsubseteq D\ ==>\ Union(C)\
+\isasymsubseteq \ Union(D)"\isanewline
+\isacommand{apply}\ (rule\ subsetI)\isanewline
+\ 1.\ \isasymAnd x.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ \isasymUnion C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
+\end{isabelle}
+Big union is like an existential quantifier --- the occurrence in the
+assumptions must be eliminated early, since it creates parameters.
+\index{*UnionE theorem}
+\begin{isabelle}
+\isacommand{apply}\ (erule\ UnionE)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ \isasymUnion D%
+\end{isabelle}
+Now we may apply \tdx{UnionI}, which creates an unknown involving the
+parameters.  To show \isa{x\ \isasymin \ \isasymUnion D} it suffices to show that~\isa{x} belongs
+to some element, say~\isa{?B2(x,B)}, of~\isa{D}\@.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ UnionI)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ D\isanewline
+\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
+\end{isabelle}
+Combining the rule \tdx{subsetD} with the assumption \isa{C\ \isasymsubseteq \ D} yields 
+$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
+\isa{erule} removes the subset assumption.
+\begin{isabelle}
+\isacommand{apply}\ (erule\ subsetD)\isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ ?B2(x,\ B)\ \isasymin \ C\isanewline
+\ 2.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ ?B2(x,\ B)
+\end{isabelle}
+The rest is routine.  Observe how the first call to \isa{assumption}
+instantiates \isa{?B2(x,B)} to~\isa{B}\@.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymAnd x\ B.\ \isasymlbrakk C\ \isasymsubseteq \ D;\ x\ \isasymin \ B;\ B\ \isasymin \ C\isasymrbrakk \ \isasymLongrightarrow \ x\ \isasymin \ B%
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+No\ subgoals!\isanewline
+\isacommand{done}%
+\end{isabelle}
+Again, \isa{blast} can prove this theorem in one step.
+
+The theory \isa{ZF/equalities.thy} has many similar proofs.  Reasoning about
+general intersection can be difficult because of its anomalous behaviour on
+the empty set.  However, \isa{blast} copes well with these.  Here is
+a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
+\[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
+       \Bigl(\inter@{x\in C} A(x)\Bigr)  \int  
+       \Bigl(\inter@{x\in C} B(x)\Bigr)  \]
+
+\section{Low-level reasoning about functions}
+The derived rules \isa{lamI}, \isa{lamE}, \isa{lam_type}, \isa{beta}
+and \isa{eta} support reasoning about functions in a
+$\lambda$-calculus style.  This is generally easier than regarding
+functions as sets of ordered pairs.  But sometimes we must look at the
+underlying representation, as in the following proof
+of~\tdx{fun_disjoint_apply1}.  This states that if $f$ and~$g$ are
+functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
+$(f\un g)`a = f`a$:
+\begin{isabelle}
+\isacommand{lemma}\ "[|\ a\ \isasymin \ A;\ \ f\ \isasymin \ A->B;\ \ g\ \isasymin \ C->D;\ \ A\ \isasyminter \ C\ =\ 0\ |]
+\isanewline
+\ \ \ \ \ \ \ \ ==>\ (f\ \isasymunion \ g)`a\ =\ f`a"
+\end{isabelle}
+Using \tdx{apply_equality}, we reduce the equality to reasoning about
+ordered pairs.  The second subgoal is to verify that \isa{f\ \isasymunion \ g} is a function, since
+\isa{Pi(?A,?B)} denotes a dependent function space.
+\begin{isabelle}
+\isacommand{apply}\ (rule\ apply\_equality)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 1.\ }\isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\ \isasymunion \ g\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
+choose~$f$:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ UnI1)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ \isasymlangle a,\ f\ `\ a\isasymrangle \ \isasymin \ f\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
+essentially the converse of \tdx{apply_equality}:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ apply\_Pair)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ Pi(?A2,?B2)\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ ?A2\isanewline
+\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 3.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
+from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
+function space, and observe that~{\tt?A2} gets instantiated to~\isa{A}.
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ a\ \isasymin \ A\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 2.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \isanewline
+\isaindent{\ 1.\ }\isasymLongrightarrow \ f\ \isasymunion \ g\ \isasymin \ Pi(?A,\ ?B)
+\end{isabelle}
+To construct functions of the form $f\un g$, we apply
+\tdx{fun_disjoint_Un}:
+\begin{isabelle}
+\isacommand{apply}\ (rule\ fun\_disjoint\_Un)\isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ f\ \isasymin \ ?A3\ \isasymrightarrow \ ?B3\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
+\ 3.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ ?A3\ \isasyminter \ ?C3\ =\ 0
+\end{isabelle}
+The remaining subgoals are instances of the assumptions.  Again, observe how
+unknowns become instantiated:
+\begin{isabelle}
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ g\ \isasymin \ ?C3\ \isasymrightarrow \ ?D3\isanewline
+\ 2.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ ?C3\ =\ 0
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+\ 1.\ \isasymlbrakk a\ \isasymin \ A;\ f\ \isasymin \ A\ \isasymrightarrow \ B;\ g\ \isasymin \ C\ \isasymrightarrow \ D;\ A\ \isasyminter \ C\ =\ 0\isasymrbrakk \ \isasymLongrightarrow \ A\ \isasyminter \ C\ =\ 0
+\isanewline
+\isacommand{apply}\ assumption\ \isanewline
+No\ subgoals!\isanewline
+\isacommand{done}
+\end{isabelle}
+See the theories \isa{ZF/func.thy} and \isa{ZF/WF.thy} for more
+examples of reasoning about functions.
+
+\index{set theory|)}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/ZF/document/build	Mon Aug 27 21:46:00 2012 +0200
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo -o isabelle_zf.pdf "ZF"
+"$ISABELLE_TOOL" logo -o isabelle_zf.eps "ZF"
+
+cp "$ISABELLE_HOME/doc-src/ttbox.sty" .
+cp "$ISABELLE_HOME/doc-src/proof.sty" .
+cp "$ISABELLE_HOME/doc-src/manual.bib" .
+cp "$ISABELLE_HOME/doc-src/Logics/document/syntax.tex" .
+
+"$ISABELLE_TOOL" latex -o sty
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o bbl
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_HOME/doc-src/sedindex" root
+[ -f root.out ] && "$ISABELLE_HOME/doc-src/fixbookmarks" root.out
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o "$FORMAT"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/ZF/document/logics.sty	Mon Aug 27 21:46:00 2012 +0200
@@ -0,0 +1,1 @@
+% logics.sty : Logics Manuals Page Layout
%
\typeout{Document Style logics. Released 18 August 2003}

\hyphenation{Isa-belle man-u-script man-u-scripts ap-pen-dix mut-u-al-ly}
\hyphenation{data-type data-types co-data-type co-data-types }

%usage: \iflabelundefined{LABEL}{if not defined}{if defined}
\newcommand{\iflabelundefined}[1]{\@ifundefined{r@#1}}


%%%INDEXING  use isa-index to process the index

\newcommand\seealso[2]{\emph{see also} #1}
\usepackage{makeidx}

%index, putting page numbers of definitions in boldface
\def\bold#1{\textbf{#1}}
\newcommand\fnote[1]{#1n}
\newcommand\indexbold[1]{\index{#1|bold}}

% The alternative to \protect\isa in the indexing macros is
% \noexpand\noexpand \noexpand\isa
% need TWO levels of \noexpand to delay the expansion of \isa:
%  the \noexpand\noexpand will leave one \noexpand, to be given to the
%  (still unexpanded) \isa token.  See TeX by Topic, page 122.

%%%% for indexing constants, symbols, theorems, ...
\newcommand\cdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (constant)}}
\newcommand\sdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (symbol)}}

\newcommand\tdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)}}
\newcommand\tdxbold[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)|bold}}

\newcommand\cldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (class)}}
\newcommand\tydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (type)}}
\newcommand\thydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theory)}}

\newcommand\attrdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (attribute)}}
\newcommand\cmmdx[1]{\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\commdx[1]{\isacommand{#1}\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\methdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (method)}}
\newcommand\tooldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (tool)}}
\newcommand\settdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (setting)}}

%set argument in \bf font and index in ROMAN font (for definitions in text!)
\newcommand\bfindex[1]{{\bf#1}\index{#1|bold}\@}

\newcommand\rmindex[1]{{#1}\index{#1}\@}
\newcommand\ttindex[1]{\texttt{#1}\index{#1@\texttt{#1}}\@}
\newcommand\ttindexbold[1]{\texttt{#1}\index{#1@\texttt{#1}|bold}\@}

\newcommand{\indexboldpos}[2]{#1\@}
\newcommand{\ttindexboldpos}[2]{\isa{#1}\@}

%\newtheorem{theorem}{Theorem}[section]
\newtheorem{Exercise}{Exercise}[section]
\newenvironment{exercise}{\begin{Exercise}\rm}{\end{Exercise}}
\newcommand{\ttlbr}{\texttt{[|}}
\newcommand{\ttrbr}{\texttt{|]}}
\newcommand{\ttor}{\texttt{|}}
\newcommand{\ttall}{\texttt{!}}
\newcommand{\ttuniquex}{\texttt{?!}}
\newcommand{\ttEXU}{\texttt{EX!}}
\newcommand{\ttAnd}{\texttt{!!}}

\newcommand{\isasymignore}{}
\newcommand{\isasymimp}{\isasymlongrightarrow}
\newcommand{\isasymImp}{\isasymLongrightarrow}
\newcommand{\isasymFun}{\isasymRightarrow}
\newcommand{\isasymuniqex}{\isamath{\exists!\,}}
\renewcommand{\S}{Sect.\ts}

\renewenvironment{isamarkuptxt}{\begin{isamarkuptext}}{\end{isamarkuptext}}

\newif\ifremarks
\newcommand{\REMARK}[1]{\ifremarks\marginpar{\raggedright\footnotesize#1}\fi}

%names of Isabelle rules
\newcommand{\rulename}[1]{\hfill(#1)}
\newcommand{\rulenamedx}[1]{\hfill(#1\index{#1@\protect\isa{#1} (theorem)|bold})}

%%%% meta-logical connectives

\let\Forall=\bigwedge
\let\Imp=\Longrightarrow
\let\To=\Rightarrow
\newcommand{\Var}[1]{{?\!#1}}

%%% underscores as ordinary characters, not for subscripting
%%  use @ or \sb for subscripting; use \at for @
%%  only works in \tt font
%%  must not make _ an active char; would make \ttindex fail!
\gdef\underscoreoff{\catcode`\@=8\catcode`\_=\other}
\gdef\underscoreon{\catcode`\_=8\makeatother}
\chardef\other=12
\chardef\at=`\@

% alternative underscore
\def\_{\leavevmode\kern.06em\vbox{\hrule height.2ex width.3em}\hskip0.1em}


%%%% ``WARNING'' environment
\def\dbend{\vtop to 0pt{\vss\hbox{\Huge\bf!}\vss}}
\newenvironment{warn}{\medskip\medbreak\begingroup \clubpenalty=10000 
         \small %%WAS\baselineskip=0.9\baselineskip
         \noindent \hangindent\parindent \hangafter=-2 
         \hbox to0pt{\hskip-\hangindent\dbend\hfill}\ignorespaces}%
        {\par\endgroup\medbreak}


%%%% Standard logical symbols
\let\turn=\vdash
\let\conj=\wedge
\let\disj=\vee
\let\imp=\rightarrow
\let\bimp=\leftrightarrow
\newcommand\all[1]{\forall#1.}  %quantification
\newcommand\ex[1]{\exists#1.}
\newcommand{\pair}[1]{\langle#1\rangle}

\newcommand{\lparr}{\mathopen{(\!|}}
\newcommand{\rparr}{\mathclose{|\!)}}
\newcommand{\fs}{\mathpunct{,\,}}
\newcommand{\ty}{\mathrel{::}}
\newcommand{\asn}{\mathrel{:=}}
\newcommand{\more}{\ldots}
\newcommand{\record}[1]{\lparr #1 \rparr}
\newcommand{\dtt}{\mathord.}

\newcommand\lbrakk{\mathopen{[\![}}
\newcommand\rbrakk{\mathclose{]\!]}}
\newcommand\List[1]{\lbrakk#1\rbrakk}  %was \obj
\newcommand\vpile[1]{\begin{array}{c}#1\end{array}}
\newenvironment{matharray}[1]{\[\begin{array}{#1}}{\end{array}\]}
\newcommand{\Text}[1]{\mbox{#1}}

\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
\newcommand{\dsh}{\mathit{\dshsym}}

\let\int=\cap
\let\un=\cup
\let\inter=\bigcap
\let\union=\bigcup

\def\ML{{\sc ml}}
\def\AST{{\sc ast}}

%macros to change the treatment of symbols
\def\relsemicolon{\mathcode`\;="303B}   %treat ; like a relation
\def\binperiod{\mathcode`\.="213A}   %treat . like a binary operator
\def\binvert{\mathcode`\|="226A}     %treat | like a binary operator

%redefinition of \sloppy and \fussy to use \emergencystretch
\def\sloppy{\tolerance2000 \hfuzz.5pt \vfuzz.5pt \emergencystretch=15pt}
\def\fussy{\tolerance200 \hfuzz.1pt \vfuzz.1pt \emergencystretch=0pt}

%non-bf version of description
\def\descrlabel#1{\hspace\labelsep #1}
\def\descr{\list{}{\labelwidth\z@ \itemindent-\leftmargin\let\makelabel\descrlabel}}
\let\enddescr\endlist

% The mathcodes for the letters A, ..., Z, a, ..., z are changed to
% generate text italic rather than math italic by default. This makes
% multi-letter identifiers look better. The mathcode for character c
% is set to |"7000| (variable family) + |"400| (text italic) + |c|.
%
\DeclareSymbolFont{italics}{\encodingdefault}{\rmdefault}{m}{it}%
\def\@setmcodes#1#2#3{{\count0=#1 \count1=#3
        \loop \global\mathcode\count0=\count1 \ifnum \count0<#2
        \advance\count0 by1 \advance\count1 by1 \repeat}}
\@setmcodes{`A}{`Z}{"7\hexnumber@\symitalics41}
\@setmcodes{`a}{`z}{"7\hexnumber@\symitalics61}

%%% \dquotes permits usage of "..." for \hbox{...} 
%%%   also taken from under.sty
{\catcode`\"=\active
\gdef\dquotes{\catcode`\"=\active  \let"=\@mathText}%
\gdef\@mathText#1"{\hbox{\mathTextFont #1\/}}}
\def\mathTextFont{\frenchspacing\tt}
\def\dquotesoff{\catcode`\"=\other}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/ZF/document/root.tex	Mon Aug 27 21:46:00 2012 +0200
@@ -0,0 +1,77 @@
+\documentclass[11pt,a4paper]{report}
+\usepackage{isabelle,isabellesym}
+\usepackage{graphicx,logics,ttbox,proof,latexsym}
+
+\usepackage{pdfsetup}   
+%last package!
+
+\remarkstrue 
+
+%%% to index derived rls: ^\([a-zA-Z0-9][a-zA-Z0-9_]*\)        \\tdx{\1}  
+%%% to index rulenames:   ^ *(\([a-zA-Z0-9][a-zA-Z0-9_]*\),     \\tdx{\1}  
+%%% to index constants:   \\tt \([a-zA-Z0-9][a-zA-Z0-9_]*\)     \\cdx{\1}  
+%%% to deverbify:         \\verb|\([^|]*\)|     \\ttindex{\1}  
+
+\title{\includegraphics[scale=0.5]{isabelle_zf} \\[4ex] 
+       Isabelle's Logics: FOL and ZF}
+
+\author{{\em Lawrence C. Paulson}\\
+        Computer Laboratory \\ University of Cambridge \\
+        \texttt{lcp@cl.cam.ac.uk}\\[3ex] 
+        With Contributions by Tobias Nipkow and Markus Wenzel}
+
+\newcommand\subcaption[1]{\par {\centering\normalsize\sc#1\par}\bigskip
+  \hrule\bigskip}
+\newenvironment{constants}{\begin{center}\small\begin{tabular}{rrrr}}{\end{tabular}\end{center}}
+
+\let\ts=\thinspace
+
+\makeindex
+
+\underscoreoff
+
+\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}  %% {secnumdepth}{2}???
+
+\pagestyle{headings}
+\sloppy
+\binperiod     %%%treat . like a binary operator
+
+\begin{document}
+\maketitle 
+
+\begin{abstract}
+This manual describes Isabelle's formalizations of many-sorted first-order
+logic (\texttt{FOL}) and Zermelo-Fraenkel set theory (\texttt{ZF}).  See the
+\emph{Reference Manual} for general Isabelle commands, and \emph{Introduction
+  to Isabelle} for an overall tutorial.
+
+This manual is part of the earlier Isabelle documentation, 
+which is somewhat superseded by the Isabelle/HOL
+\emph{Tutorial}~\cite{isa-tutorial}. However, the present document is the
+only available documentation for Isabelle's versions of first-order logic
+and set theory. Much of it is concerned with 
+the primitives for conducting proofs 
+using the ML top level.  It has been rewritten to use the Isar proof
+language, but evidence of the old \ML{} orientation remains.
+\end{abstract}
+
+
+\subsubsection*{Acknowledgements} 
+Markus Wenzel made numerous improvements.
+    Philippe de Groote contributed to~ZF.  Philippe No\"el and
+    Martin Coen made many contributions to~ZF.  The research has 
+    been funded by the EPSRC (grants GR/G53279, GR/H40570, GR/K57381,
+    GR/K77051, GR/M75440) and by ESPRIT (projects 3245:
+    Logical Frameworks, and 6453: Types) and by the DFG Schwerpunktprogramm
+    \emph{Deduktion}.
+    
+\pagenumbering{roman} \tableofcontents \cleardoublepage
+\pagenumbering{arabic} 
+\setcounter{page}{1} 
+\input{syntax}
+\include{FOL}
+\include{ZF}
+\bibliographystyle{plain}
+\bibliography{manual}
+\printindex
+\end{document}
--- a/doc-src/ZF/logics-ZF.tex	Mon Aug 27 21:37:34 2012 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-\documentclass[11pt,a4paper]{report}
-\usepackage{../../lib/texinputs/isabelle,../../lib/texinputs/isabellesym}
-\usepackage{graphicx,logics,../ttbox,../proof,latexsym}
-
-\usepackage{../pdfsetup}   
-%last package!
-
-\remarkstrue 
-
-%%% to index derived rls: ^\([a-zA-Z0-9][a-zA-Z0-9_]*\)        \\tdx{\1}  
-%%% to index rulenames:   ^ *(\([a-zA-Z0-9][a-zA-Z0-9_]*\),     \\tdx{\1}  
-%%% to index constants:   \\tt \([a-zA-Z0-9][a-zA-Z0-9_]*\)     \\cdx{\1}  
-%%% to deverbify:         \\verb|\([^|]*\)|     \\ttindex{\1}  
-
-\title{\includegraphics[scale=0.5]{isabelle_zf} \\[4ex] 
-       Isabelle's Logics: FOL and ZF}
-
-\author{{\em Lawrence C. Paulson}\\
-        Computer Laboratory \\ University of Cambridge \\
-        \texttt{lcp@cl.cam.ac.uk}\\[3ex] 
-        With Contributions by Tobias Nipkow and Markus Wenzel}
-
-\newcommand\subcaption[1]{\par {\centering\normalsize\sc#1\par}\bigskip
-  \hrule\bigskip}
-\newenvironment{constants}{\begin{center}\small\begin{tabular}{rrrr}}{\end{tabular}\end{center}}
-
-\let\ts=\thinspace
-
-\makeindex
-
-\underscoreoff
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}  %% {secnumdepth}{2}???
-
-\pagestyle{headings}
-\sloppy
-\binperiod     %%%treat . like a binary operator
-
-\begin{document}
-\maketitle 
-
-\begin{abstract}
-This manual describes Isabelle's formalizations of many-sorted first-order
-logic (\texttt{FOL}) and Zermelo-Fraenkel set theory (\texttt{ZF}).  See the
-\emph{Reference Manual} for general Isabelle commands, and \emph{Introduction
-  to Isabelle} for an overall tutorial.
-
-This manual is part of the earlier Isabelle documentation, 
-which is somewhat superseded by the Isabelle/HOL
-\emph{Tutorial}~\cite{isa-tutorial}. However, the present document is the
-only available documentation for Isabelle's versions of first-order logic
-and set theory. Much of it is concerned with 
-the primitives for conducting proofs 
-using the ML top level.  It has been rewritten to use the Isar proof
-language, but evidence of the old \ML{} orientation remains.
-\end{abstract}
-
-
-\subsubsection*{Acknowledgements} 
-Markus Wenzel made numerous improvements.
-    Philippe de Groote contributed to~ZF.  Philippe No\"el and
-    Martin Coen made many contributions to~ZF.  The research has 
-    been funded by the EPSRC (grants GR/G53279, GR/H40570, GR/K57381,
-    GR/K77051, GR/M75440) and by ESPRIT (projects 3245:
-    Logical Frameworks, and 6453: Types) and by the DFG Schwerpunktprogramm
-    \emph{Deduktion}.
-    
-\pagenumbering{roman} \tableofcontents \cleardoublepage
-\pagenumbering{arabic} 
-\setcounter{page}{1} 
-\input{../Logics/syntax}
-\include{FOL}
-\include{ZF}
-\bibliographystyle{plain}
-\bibliography{../manual}
-\printindex
-\end{document}
--- a/doc-src/ZF/logics.sty	Mon Aug 27 21:37:34 2012 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-% logics.sty : Logics Manuals Page Layout
%
\typeout{Document Style logics. Released 18 August 2003}

\hyphenation{Isa-belle man-u-script man-u-scripts ap-pen-dix mut-u-al-ly}
\hyphenation{data-type data-types co-data-type co-data-types }

%usage: \iflabelundefined{LABEL}{if not defined}{if defined}
\newcommand{\iflabelundefined}[1]{\@ifundefined{r@#1}}


%%%INDEXING  use isa-index to process the index

\newcommand\seealso[2]{\emph{see also} #1}
\usepackage{makeidx}

%index, putting page numbers of definitions in boldface
\def\bold#1{\textbf{#1}}
\newcommand\fnote[1]{#1n}
\newcommand\indexbold[1]{\index{#1|bold}}

% The alternative to \protect\isa in the indexing macros is
% \noexpand\noexpand \noexpand\isa
% need TWO levels of \noexpand to delay the expansion of \isa:
%  the \noexpand\noexpand will leave one \noexpand, to be given to the
%  (still unexpanded) \isa token.  See TeX by Topic, page 122.

%%%% for indexing constants, symbols, theorems, ...
\newcommand\cdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (constant)}}
\newcommand\sdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (symbol)}}

\newcommand\tdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)}}
\newcommand\tdxbold[1]{\isa{#1}\index{#1@\protect\isa{#1} (theorem)|bold}}

\newcommand\cldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (class)}}
\newcommand\tydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (type)}}
\newcommand\thydx[1]{\isa{#1}\index{#1@\protect\isa{#1} (theory)}}

\newcommand\attrdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (attribute)}}
\newcommand\cmmdx[1]{\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\commdx[1]{\isacommand{#1}\index{#1@\protect\isacommand{#1} (command)}}
\newcommand\methdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (method)}}
\newcommand\tooldx[1]{\isa{#1}\index{#1@\protect\isa{#1} (tool)}}
\newcommand\settdx[1]{\isa{#1}\index{#1@\protect\isa{#1} (setting)}}

%set argument in \bf font and index in ROMAN font (for definitions in text!)
\newcommand\bfindex[1]{{\bf#1}\index{#1|bold}\@}

\newcommand\rmindex[1]{{#1}\index{#1}\@}
\newcommand\ttindex[1]{\texttt{#1}\index{#1@\texttt{#1}}\@}
\newcommand\ttindexbold[1]{\texttt{#1}\index{#1@\texttt{#1}|bold}\@}

\newcommand{\indexboldpos}[2]{#1\@}
\newcommand{\ttindexboldpos}[2]{\isa{#1}\@}

%\newtheorem{theorem}{Theorem}[section]
\newtheorem{Exercise}{Exercise}[section]
\newenvironment{exercise}{\begin{Exercise}\rm}{\end{Exercise}}
\newcommand{\ttlbr}{\texttt{[|}}
\newcommand{\ttrbr}{\texttt{|]}}
\newcommand{\ttor}{\texttt{|}}
\newcommand{\ttall}{\texttt{!}}
\newcommand{\ttuniquex}{\texttt{?!}}
\newcommand{\ttEXU}{\texttt{EX!}}
\newcommand{\ttAnd}{\texttt{!!}}

\newcommand{\isasymignore}{}
\newcommand{\isasymimp}{\isasymlongrightarrow}
\newcommand{\isasymImp}{\isasymLongrightarrow}
\newcommand{\isasymFun}{\isasymRightarrow}
\newcommand{\isasymuniqex}{\isamath{\exists!\,}}
\renewcommand{\S}{Sect.\ts}

\renewenvironment{isamarkuptxt}{\begin{isamarkuptext}}{\end{isamarkuptext}}

\newif\ifremarks
\newcommand{\REMARK}[1]{\ifremarks\marginpar{\raggedright\footnotesize#1}\fi}

%names of Isabelle rules
\newcommand{\rulename}[1]{\hfill(#1)}
\newcommand{\rulenamedx}[1]{\hfill(#1\index{#1@\protect\isa{#1} (theorem)|bold})}

%%%% meta-logical connectives

\let\Forall=\bigwedge
\let\Imp=\Longrightarrow
\let\To=\Rightarrow
\newcommand{\Var}[1]{{?\!#1}}

%%% underscores as ordinary characters, not for subscripting
%%  use @ or \sb for subscripting; use \at for @
%%  only works in \tt font
%%  must not make _ an active char; would make \ttindex fail!
\gdef\underscoreoff{\catcode`\@=8\catcode`\_=\other}
\gdef\underscoreon{\catcode`\_=8\makeatother}
\chardef\other=12
\chardef\at=`\@

% alternative underscore
\def\_{\leavevmode\kern.06em\vbox{\hrule height.2ex width.3em}\hskip0.1em}


%%%% ``WARNING'' environment
\def\dbend{\vtop to 0pt{\vss\hbox{\Huge\bf!}\vss}}
\newenvironment{warn}{\medskip\medbreak\begingroup \clubpenalty=10000 
         \small %%WAS\baselineskip=0.9\baselineskip
         \noindent \hangindent\parindent \hangafter=-2 
         \hbox to0pt{\hskip-\hangindent\dbend\hfill}\ignorespaces}%
        {\par\endgroup\medbreak}


%%%% Standard logical symbols
\let\turn=\vdash
\let\conj=\wedge
\let\disj=\vee
\let\imp=\rightarrow
\let\bimp=\leftrightarrow
\newcommand\all[1]{\forall#1.}  %quantification
\newcommand\ex[1]{\exists#1.}
\newcommand{\pair}[1]{\langle#1\rangle}

\newcommand{\lparr}{\mathopen{(\!|}}
\newcommand{\rparr}{\mathclose{|\!)}}
\newcommand{\fs}{\mathpunct{,\,}}
\newcommand{\ty}{\mathrel{::}}
\newcommand{\asn}{\mathrel{:=}}
\newcommand{\more}{\ldots}
\newcommand{\record}[1]{\lparr #1 \rparr}
\newcommand{\dtt}{\mathord.}

\newcommand\lbrakk{\mathopen{[\![}}
\newcommand\rbrakk{\mathclose{]\!]}}
\newcommand\List[1]{\lbrakk#1\rbrakk}  %was \obj
\newcommand\vpile[1]{\begin{array}{c}#1\end{array}}
\newenvironment{matharray}[1]{\[\begin{array}{#1}}{\end{array}\]}
\newcommand{\Text}[1]{\mbox{#1}}

\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
\newcommand{\dsh}{\mathit{\dshsym}}

\let\int=\cap
\let\un=\cup
\let\inter=\bigcap
\let\union=\bigcup

\def\ML{{\sc ml}}
\def\AST{{\sc ast}}

%macros to change the treatment of symbols
\def\relsemicolon{\mathcode`\;="303B}   %treat ; like a relation
\def\binperiod{\mathcode`\.="213A}   %treat . like a binary operator
\def\binvert{\mathcode`\|="226A}     %treat | like a binary operator

%redefinition of \sloppy and \fussy to use \emergencystretch
\def\sloppy{\tolerance2000 \hfuzz.5pt \vfuzz.5pt \emergencystretch=15pt}
\def\fussy{\tolerance200 \hfuzz.1pt \vfuzz.1pt \emergencystretch=0pt}

%non-bf version of description
\def\descrlabel#1{\hspace\labelsep #1}
\def\descr{\list{}{\labelwidth\z@ \itemindent-\leftmargin\let\makelabel\descrlabel}}
\let\enddescr\endlist

% The mathcodes for the letters A, ..., Z, a, ..., z are changed to
% generate text italic rather than math italic by default. This makes
% multi-letter identifiers look better. The mathcode for character c
% is set to |"7000| (variable family) + |"400| (text italic) + |c|.
%
\DeclareSymbolFont{italics}{\encodingdefault}{\rmdefault}{m}{it}%
\def\@setmcodes#1#2#3{{\count0=#1 \count1=#3
        \loop \global\mathcode\count0=\count1 \ifnum \count0<#2
        \advance\count0 by1 \advance\count1 by1 \repeat}}
\@setmcodes{`A}{`Z}{"7\hexnumber@\symitalics41}
\@setmcodes{`a}{`z}{"7\hexnumber@\symitalics61}

%%% \dquotes permits usage of "..." for \hbox{...} 
%%%   also taken from under.sty
{\catcode`\"=\active
\gdef\dquotes{\catcode`\"=\active  \let"=\@mathText}%
\gdef\@mathText#1"{\hbox{\mathTextFont #1\/}}}
\def\mathTextFont{\frenchspacing\tt}
\def\dquotesoff{\catcode`\"=\other}
\ No newline at end of file