Got rid of obsolete "goal" commands.
authorpaulson
Thu, 16 Jul 1998 11:50:01 +0200
changeset 5151 1e944fe5ce96
parent 5150 6e2e9b92c301
child 5152 5b63f591678b
Got rid of obsolete "goal" commands. Also inserted spaces after all periods
doc-src/Logics/CTT-eg.txt
doc-src/Logics/CTT.tex
doc-src/Logics/FOL-eg.txt
doc-src/Logics/FOL.tex
doc-src/Logics/HOL.tex
doc-src/Logics/LK-eg.txt
doc-src/Logics/LK.tex
doc-src/Logics/ZF-eg.txt
doc-src/Logics/ZF.tex
--- a/doc-src/Logics/CTT-eg.txt	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/CTT-eg.txt	Thu Jul 16 11:50:01 1998 +0200
@@ -6,7 +6,7 @@
 
 (*** Type inference, from CTT/ex/typechk.ML ***)
 
-goal CTT.thy "lam n. rec(n, 0, %x y.x) : ?A";
+Goal "lam n. rec(n, 0, %x y. x) : ?A";
 by (resolve_tac [ProdI] 1);
 by (eresolve_tac [NE] 2);
 by (resolve_tac [NI0] 2);
@@ -17,7 +17,7 @@
 
 (*** Logical reasoning, from CTT/ex/elim.ML ***)
 
-val prems = goal CTT.thy
+val prems = Goal
     "[| A type;  \
 \       !!x. x:A ==> B(x) type;       \
 \       !!x. x:A ==> C(x) type;       \
@@ -35,10 +35,10 @@
 
 (*** Currying, from CTT/ex/elim.ML ***)
 
-val prems = goal CTT.thy
-    "[| A type; !!x. x:A ==> B(x) type; 			\
-\               !!z. z: (SUM x:A. B(x)) ==> C(z) type 		\
-\    |] ==> ?a : PROD f: (PROD z : (SUM x:A . B(x)) . C(z)).	\
+val prems = Goal
+    "[| A type; !!x. x:A ==> B(x) type;                         \
+\               !!z. z: (SUM x:A. B(x)) ==> C(z) type           \
+\    |] ==> ?a : PROD f: (PROD z : (SUM x:A . B(x)) . C(z)).    \
 \                     (PROD x:A . PROD y:B(x) . C(<x,y>))";
 by (intr_tac prems);
 by (eresolve_tac [ProdE] 1);
@@ -47,7 +47,7 @@
 
 (*** Axiom of Choice ***)
 
-val prems = goal CTT.thy   
+val prems = Goal   
     "[| A type;  !!x. x:A ==> B(x) type;  \
 \       !!x y.[| x:A;  y:B(x) |] ==> C(x,y) type  \
 \    |] ==> ?a : PROD h: (PROD x:A. SUM y:B(x). C(x,y)).    \
@@ -65,39 +65,11 @@
 
 
 
-> goal CTT.thy "lam n. rec(n, 0, %x y.x) : ?A";
-Level 0
-lam n. rec(n,0,%x y. x) : ?A
- 1. lam n. rec(n,0,%x y. x) : ?A
-> by (resolve_tac [ProdI] 1);
-Level 1
-lam n. rec(n,0,%x y. x) : PROD x:?A1. ?B1(x)
- 1. ?A1 type
- 2. !!n. n : ?A1 ==> rec(n,0,%x y. x) : ?B1(n)
-> by (eresolve_tac [NE] 2);
-Level 2
-lam n. rec(n,0,%x y. x) : PROD x:N. ?C2(x,x)
- 1. N type
- 2. !!n. 0 : ?C2(n,0)
- 3. !!n x y. [| x : N; y : ?C2(n,x) |] ==> x : ?C2(n,succ(x))
-> by (resolve_tac [NI0] 2);
-Level 3
-lam n. rec(n,0,%x y. x) : N --> N
- 1. N type
- 2. !!n x y. [| x : N; y : N |] ==> x : N
-> by (assume_tac 2);
-Level 4
-lam n. rec(n,0,%x y. x) : N --> N
- 1. N type
-> by (resolve_tac [NF] 1);
-Level 5
-lam n. rec(n,0,%x y. x) : N --> N
-No subgoals!
+
+STOP_HERE;
 
 
-
-
-> val prems = goal CTT.thy
+> val prems = Goal
 #     "[| A type;  \
 # \       !!x. x:A ==> B(x) type;       \
 # \       !!x. x:A ==> C(x) type;       \
@@ -174,7 +146,7 @@
 
 
 
-> val prems = goal CTT.thy
+> val prems = Goal
 #     "[| A type; !!x. x:A ==> B(x) type; \
 # \               !!z. z: (SUM x:A. B(x)) ==> C(z) type |] \
 # \    ==> ?a : (PROD z : (SUM x:A . B(x)) . C(z)) \
@@ -204,7 +176,7 @@
 
 
 
-> val prems = goal CTT.thy
+> val prems = Goal
 #     "[| A type;  !!x. x:A ==> B(x) type;  \
 # \       !!x y.[| x:A;  y:B(x) |] ==> C(x,y) type  \
 # \    |] ==> ?a :    (PROD x:A. SUM y:B(x). C(x,y))    \
--- a/doc-src/Logics/CTT.tex	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/CTT.tex	Thu Jul 16 11:50:01 1998 +0200
@@ -126,13 +126,13 @@
 \begin{center} \tt\frenchspacing
 \begin{tabular}{rrr} 
   \it external                  & \it internal  & \it standard notation \\ 
-  \sdx{PROD} $x$:$A$ . $B[x]$   &  Prod($A$, $\lambda x.B[x]$) &
+  \sdx{PROD} $x$:$A$ . $B[x]$   &  Prod($A$, $\lambda x. B[x]$) &
         \rm product $\prod@{x\in A}B[x]$ \\
-  \sdx{SUM} $x$:$A$ . $B[x]$    & Sum($A$, $\lambda x.B[x]$) &
+  \sdx{SUM} $x$:$A$ . $B[x]$    & Sum($A$, $\lambda x. B[x]$) &
         \rm sum $\sum@{x\in A}B[x]$ \\
-  $A$ --> $B$     &  Prod($A$, $\lambda x.B$) &
+  $A$ --> $B$     &  Prod($A$, $\lambda x. B$) &
         \rm function space $A\to B$ \\
-  $A$ * $B$       &  Sum($A$, $\lambda x.B$)  &
+  $A$ * $B$       &  Sum($A$, $\lambda x. B$)  &
         \rm binary product $A\times B$
 \end{tabular}
 \end{center}
@@ -169,7 +169,7 @@
 the function application operator (sometimes called `apply'), and the
 2-place type operators.  Note that meta-level abstraction and application,
 $\lambda x.b$ and $f(a)$, differ from object-level abstraction and
-application, \hbox{\tt lam $x$.$b$} and $b{\tt`}a$.  A {\CTT}
+application, \hbox{\tt lam $x$. $b$} and $b{\tt`}a$.  A {\CTT}
 function~$f$ is simply an individual as far as Isabelle is concerned: its
 Isabelle type is~$i$, not say $i\To i$.
 
@@ -180,8 +180,8 @@
 \index{*SUM symbol}\index{*PROD symbol}
 Quantification is expressed using general sums $\sum@{x\in A}B[x]$ and
 products $\prod@{x\in A}B[x]$.  Instead of {\tt Sum($A$,$B$)} and {\tt
-  Prod($A$,$B$)} we may write \hbox{\tt SUM $x$:$A$.$B[x]$} and \hbox{\tt
-  PROD $x$:$A$.$B[x]$}.  For example, we may write
+  Prod($A$,$B$)} we may write \hbox{\tt SUM $x$:$A$. $B[x]$} and \hbox{\tt
+  PROD $x$:$A$. $B[x]$}.  For example, we may write
 \begin{ttbox}
 SUM y:B. PROD x:A. C(x,y)   {\rm for}   Sum(B, \%y. Prod(A, \%x. C(x,y)))
 \end{ttbox}
@@ -232,20 +232,20 @@
 
 \tdx{NE}        [| p: N;  a: C(0);  
              !!u v. [| u: N; v: C(u) |] ==> b(u,v): C(succ(u)) 
-          |] ==> rec(p, a, \%u v.b(u,v)) : C(p)
+          |] ==> rec(p, a, \%u v. b(u,v)) : C(p)
 
 \tdx{NEL}       [| p = q : N;  a = c : C(0);  
              !!u v. [| u: N; v: C(u) |] ==> b(u,v)=d(u,v): C(succ(u))
-          |] ==> rec(p, a, \%u v.b(u,v)) = rec(q,c,d) : C(p)
+          |] ==> rec(p, a, \%u v. b(u,v)) = rec(q,c,d) : C(p)
 
 \tdx{NC0}       [| a: C(0);  
              !!u v. [| u: N; v: C(u) |] ==> b(u,v): C(succ(u))
-          |] ==> rec(0, a, \%u v.b(u,v)) = a : C(0)
+          |] ==> rec(0, a, \%u v. b(u,v)) = a : C(0)
 
 \tdx{NC_succ}   [| p: N;  a: C(0);  
              !!u v. [| u: N; v: C(u) |] ==> b(u,v): C(succ(u)) 
-          |] ==> rec(succ(p), a, \%u v.b(u,v)) =
-                 b(p, rec(p, a, \%u v.b(u,v))) : C(succ(p))
+          |] ==> rec(succ(p), a, \%u v. b(u,v)) =
+                 b(p, rec(p, a, \%u v. b(u,v))) : C(succ(p))
 
 \tdx{zero_ne_succ}      [| a: N;  0 = succ(a) : N |] ==> 0: F
 \end{ttbox}
@@ -255,22 +255,22 @@
 
 \begin{figure} 
 \begin{ttbox}
-\tdx{ProdF}     [| A type; !!x. x:A ==> B(x) type |] ==> PROD x:A.B(x) type
+\tdx{ProdF}     [| A type; !!x. x:A ==> B(x) type |] ==> PROD x:A. B(x) type
 \tdx{ProdFL}    [| A = C;  !!x. x:A ==> B(x) = D(x) |] ==> 
-          PROD x:A.B(x) = PROD x:C.D(x)
+          PROD x:A. B(x) = PROD x:C. D(x)
 
 \tdx{ProdI}     [| A type;  !!x. x:A ==> b(x):B(x)
-          |] ==> lam x.b(x) : PROD x:A.B(x)
+          |] ==> lam x. b(x) : PROD x:A. B(x)
 \tdx{ProdIL}    [| A type;  !!x. x:A ==> b(x) = c(x) : B(x)
-          |] ==> lam x.b(x) = lam x.c(x) : PROD x:A.B(x)
+          |] ==> lam x. b(x) = lam x. c(x) : PROD x:A. B(x)
 
-\tdx{ProdE}     [| p : PROD x:A.B(x);  a : A |] ==> p`a : B(a)
-\tdx{ProdEL}    [| p=q: PROD x:A.B(x);  a=b : A |] ==> p`a = q`b : B(a)
+\tdx{ProdE}     [| p : PROD x:A. B(x);  a : A |] ==> p`a : B(a)
+\tdx{ProdEL}    [| p=q: PROD x:A. B(x);  a=b : A |] ==> p`a = q`b : B(a)
 
 \tdx{ProdC}     [| a : A;  !!x. x:A ==> b(x) : B(x)
-          |] ==> (lam x.b(x)) ` a = b(a) : B(a)
+          |] ==> (lam x. b(x)) ` a = b(a) : B(a)
 
-\tdx{ProdC2}    p : PROD x:A.B(x) ==> (lam x. p`x) = p : PROD x:A.B(x)
+\tdx{ProdC2}    p : PROD x:A. B(x) ==> (lam x. p`x) = p : PROD x:A. B(x)
 \end{ttbox}
 \caption{Rules for the product type $\prod\sb{x\in A}B[x]$} \label{ctt-prod}
 \end{figure}
@@ -278,27 +278,27 @@
 
 \begin{figure} 
 \begin{ttbox}
-\tdx{SumF}      [| A type;  !!x. x:A ==> B(x) type |] ==> SUM x:A.B(x) type
+\tdx{SumF}      [| A type;  !!x. x:A ==> B(x) type |] ==> SUM x:A. B(x) type
 \tdx{SumFL}     [| A = C;  !!x. x:A ==> B(x) = D(x) 
-          |] ==> SUM x:A.B(x) = SUM x:C.D(x)
+          |] ==> SUM x:A. B(x) = SUM x:C. D(x)
 
-\tdx{SumI}      [| a : A;  b : B(a) |] ==> <a,b> : SUM x:A.B(x)
-\tdx{SumIL}     [| a=c:A;  b=d:B(a) |] ==> <a,b> = <c,d> : SUM x:A.B(x)
+\tdx{SumI}      [| a : A;  b : B(a) |] ==> <a,b> : SUM x:A. B(x)
+\tdx{SumIL}     [| a=c:A;  b=d:B(a) |] ==> <a,b> = <c,d> : SUM x:A. B(x)
 
-\tdx{SumE}      [| p: SUM x:A.B(x);  
+\tdx{SumE}      [| p: SUM x:A. B(x);  
              !!x y. [| x:A; y:B(x) |] ==> c(x,y): C(<x,y>) 
-          |] ==> split(p, \%x y.c(x,y)) : C(p)
+          |] ==> split(p, \%x y. c(x,y)) : C(p)
 
-\tdx{SumEL}     [| p=q : SUM x:A.B(x); 
+\tdx{SumEL}     [| p=q : SUM x:A. B(x); 
              !!x y. [| x:A; y:B(x) |] ==> c(x,y)=d(x,y): C(<x,y>)
-          |] ==> split(p, \%x y.c(x,y)) = split(q, \%x y.d(x,y)) : C(p)
+          |] ==> split(p, \%x y. c(x,y)) = split(q, \%x y. d(x,y)) : C(p)
 
 \tdx{SumC}      [| a: A;  b: B(a);
              !!x y. [| x:A; y:B(x) |] ==> c(x,y): C(<x,y>)
-          |] ==> split(<a,b>, \%x y.c(x,y)) = c(a,b) : C(<a,b>)
+          |] ==> split(<a,b>, \%x y. c(x,y)) = c(a,b) : C(<a,b>)
 
-\tdx{fst_def}   fst(a) == split(a, \%x y.x)
-\tdx{snd_def}   snd(a) == split(a, \%x y.y)
+\tdx{fst_def}   fst(a) == split(a, \%x y. x)
+\tdx{snd_def}   snd(a) == split(a, \%x y. y)
 \end{ttbox}
 \caption{Rules for the sum type $\sum\sb{x\in A}B[x]$} \label{ctt-sum}
 \end{figure}
@@ -318,23 +318,23 @@
 \tdx{PlusE}     [| p: A+B;
              !!x. x:A ==> c(x): C(inl(x));  
              !!y. y:B ==> d(y): C(inr(y))
-          |] ==> when(p, \%x.c(x), \%y.d(y)) : C(p)
+          |] ==> when(p, \%x. c(x), \%y. d(y)) : C(p)
 
 \tdx{PlusEL}    [| p = q : A+B;
              !!x. x: A ==> c(x) = e(x) : C(inl(x));   
              !!y. y: B ==> d(y) = f(y) : C(inr(y))
-          |] ==> when(p, \%x.c(x), \%y.d(y)) = 
-                 when(q, \%x.e(x), \%y.f(y)) : C(p)
+          |] ==> when(p, \%x. c(x), \%y. d(y)) = 
+                 when(q, \%x. e(x), \%y. f(y)) : C(p)
 
 \tdx{PlusC_inl} [| a: A;
              !!x. x:A ==> c(x): C(inl(x));  
              !!y. y:B ==> d(y): C(inr(y))
-          |] ==> when(inl(a), \%x.c(x), \%y.d(y)) = c(a) : C(inl(a))
+          |] ==> when(inl(a), \%x. c(x), \%y. d(y)) = c(a) : C(inl(a))
 
 \tdx{PlusC_inr} [| b: B;
              !!x. x:A ==> c(x): C(inl(x));  
              !!y. y:B ==> d(y): C(inr(y))
-          |] ==> when(inr(b), \%x.c(x), \%y.d(y)) = d(b) : C(inr(b))
+          |] ==> when(inr(b), \%x. c(x), \%y. d(y)) = d(b) : C(inr(b))
 \end{ttbox}
 \caption{Rules for the binary sum type $A+B$} \label{ctt-plus}
 \end{figure}
@@ -458,7 +458,7 @@
 proof of the Axiom of Choice.
 
 All the rules are given in $\eta$-expanded form.  For instance, every
-occurrence of $\lambda u\,v.b(u,v)$ could be abbreviated to~$b$ in the
+occurrence of $\lambda u\,v. b(u,v)$ could be abbreviated to~$b$ in the
 rules for~$N$.  The expanded form permits Isabelle to preserve bound
 variable names during backward proof.  Names of bound variables in the
 conclusion (here, $u$ and~$v$) are matched with corresponding bound
@@ -658,16 +658,16 @@
 \end{constants}
 
 \begin{ttbox}
-\tdx{add_def}           a#+b  == rec(a, b, \%u v.succ(v))  
-\tdx{diff_def}          a-b   == rec(b, a, \%u v.rec(v, 0, \%x y.x))  
+\tdx{add_def}           a#+b  == rec(a, b, \%u v. succ(v))  
+\tdx{diff_def}          a-b   == rec(b, a, \%u v. rec(v, 0, \%x y. x))  
 \tdx{absdiff_def}       a|-|b == (a-b) #+ (b-a)  
 \tdx{mult_def}          a#*b  == rec(a, 0, \%u v. b #+ v)  
 
 \tdx{mod_def}           a mod b ==
-                  rec(a, 0, \%u v. rec(succ(v) |-| b, 0, \%x y.succ(v)))
+                  rec(a, 0, \%u v. rec(succ(v) |-| b, 0, \%x y. succ(v)))
 
 \tdx{div_def}           a div b ==
-                  rec(a, 0, \%u v. rec(succ(u) mod b, succ(v), \%x y.v))
+                  rec(a, 0, \%u v. rec(succ(u) mod b, succ(v), \%x y. v))
 
 \tdx{add_typing}        [| a:N;  b:N |] ==> a #+ b : N
 \tdx{addC0}             b:N ==> 0 #+ b = b : N
@@ -714,7 +714,7 @@
 recursion, some of their definitions may be obscure.  
 
 The difference~$a-b$ is computed by taking $b$ predecessors of~$a$, where
-the predecessor function is $\lambda v. {\tt rec}(v, 0, \lambda x\,y.x)$.
+the predecessor function is $\lambda v. {\tt rec}(v, 0, \lambda x\,y. x)$.
 
 The remainder $a\bmod b$ counts up to~$a$ in a cyclic fashion, using 0
 as the successor of~$b-1$.  Absolute difference is used to test the
@@ -751,7 +751,7 @@
 unknown, takes shape in the course of the proof.  Our example is the
 predecessor function on the natural numbers.
 \begin{ttbox}
-goal CTT.thy "lam n. rec(n, 0, \%x y.x) : ?A";
+Goal "lam n. rec(n, 0, \%x y. x) : ?A";
 {\out Level 0}
 {\out lam n. rec(n,0,\%x y. x) : ?A}
 {\out  1. lam n. rec(n,0,\%x y. x) : ?A}
@@ -813,7 +813,7 @@
 unprovable subgoals will be left.  As an exercise, try to prove the
 following invalid goal:
 \begin{ttbox}
-goal CTT.thy "lam n. rec(n, 0, \%x y.tt) : ?A";
+Goal "lam n. rec(n, 0, \%x y. tt) : ?A";
 \end{ttbox}
 
 
@@ -843,7 +843,7 @@
 To begin, we bind the rule's premises --- returned by the~{\tt goal}
 command --- to the {\ML} variable~{\tt prems}.
 \begin{ttbox}
-val prems = goal CTT.thy
+val prems = Goal
     "[| A type;                       \ttback
 \ttback       !!x. x:A ==> B(x) type;       \ttback
 \ttback       !!x. x:A ==> C(x) type;       \ttback
@@ -994,7 +994,7 @@
 called~$f$; Isabelle echoes the type using \verb|-->| because there is no
 explicit dependence upon~$f$.
 \begin{ttbox}
-val prems = goal CTT.thy
+val prems = Goal
     "[| A type; !!x. x:A ==> B(x) type;                           \ttback
 \ttback               !!z. z: (SUM x:A. B(x)) ==> C(z) type             \ttback
 \ttback    |] ==> ?a : PROD f: (PROD z : (SUM x:A . B(x)) . C(z)).      \ttback
@@ -1074,7 +1074,7 @@
 (recall Fig.\ts\ref{ctt-derived}) and the type checking tactics, we can
 prove the theorem in nine steps.
 \begin{ttbox}
-val prems = goal CTT.thy
+val prems = Goal
     "[| A type;  !!x. x:A ==> B(x) type;                    \ttback
 \ttback       !!x y.[| x:A;  y:B(x) |] ==> C(x,y) type            \ttback
 \ttback    |] ==> ?a : PROD h: (PROD x:A. SUM y:B(x). C(x,y)).    \ttback
--- a/doc-src/Logics/FOL-eg.txt	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/FOL-eg.txt	Thu Jul 16 11:50:01 1998 +0200
@@ -5,8 +5,10 @@
 
 (*** Intuitionistic examples ***)
 
+context IFOL.thy;
+
 (*Quantifier example from Logic&Computation*)
-goal Int_Rule.thy "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
+Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
 by (resolve_tac [impI] 1);
 by (resolve_tac [allI] 1);
 by (resolve_tac [exI] 1);
@@ -19,7 +21,7 @@
 
 
 (*Example of Dyckhoff's method*)
-goalw Int_Rule.thy [not_def] "~ ~ ((P-->Q) | (Q-->P))";
+Goalw [not_def] "~ ~ ((P-->Q) | (Q-->P))";
 by (resolve_tac [impI] 1);
 by (eresolve_tac [disj_impE] 1);
 by (eresolve_tac [imp_impE] 1);
@@ -29,87 +31,13 @@
 
 
 
-- goal Int_Rule.thy "(EX ay. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
-Level 0
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
-- by (resolve_tac [impI] 1);
-Level 1
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. EX y. ALL x. Q(x,y) ==> ALL x. EX y. Q(x,y)
-- by (resolve_tac [allI] 1);
-Level 2
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x. EX y. ALL x. Q(x,y) ==> EX y. Q(x,y)
-- by (resolve_tac [exI] 1);
-Level 3
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x. EX y. ALL x. Q(x,y) ==> Q(x,?y2(x))
-- by (eresolve_tac [exE] 1);
-Level 4
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x y. ALL x. Q(x,y) ==> Q(x,?y2(x))
-- choplev 2;
-Level 2
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x. EX y. ALL x. Q(x,y) ==> EX y. Q(x,y)
-- by (eresolve_tac [exE] 1);
-Level 3
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x y. ALL x. Q(x,y) ==> EX y. Q(x,y)
-- by (resolve_tac [exI] 1);
-Level 4
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x y. ALL x. Q(x,y) ==> Q(x,?y3(x,y))
-- by (eresolve_tac [allE] 1);
-Level 5
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
- 1. !!x y. Q(?x4(x,y),y) ==> Q(x,?y3(x,y))
-- by (assume_tac 1);
-Level 6
-(EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))
-No subgoals!
-
-
-
-> goalw Int_Rule.thy [not_def] "~ ~ ((P-->Q) | (Q-->P))";
-Level 0
-~ ~ ((P --> Q) | (Q --> P))
- 1. ((P --> Q) | (Q --> P) --> False) --> False
-> by (resolve_tac [impI] 1);
-Level 1
-~ ~ ((P --> Q) | (Q --> P))
- 1. (P --> Q) | (Q --> P) --> False ==> False
-> by (eresolve_tac [disj_impE] 1);
-Level 2
-~ ~ ((P --> Q) | (Q --> P))
- 1. [| (P --> Q) --> False; (Q --> P) --> False |] ==> False
-> by (eresolve_tac [imp_impE] 1);
-Level 3
-~ ~ ((P --> Q) | (Q --> P))
- 1. [| (Q --> P) --> False; P; Q --> False |] ==> Q
- 2. [| (Q --> P) --> False; False |] ==> False
-> by (eresolve_tac [imp_impE] 1);
-Level 4
-~ ~ ((P --> Q) | (Q --> P))
- 1. [| P; Q --> False; Q; P --> False |] ==> P
- 2. [| P; Q --> False; False |] ==> Q
- 3. [| (Q --> P) --> False; False |] ==> False
-> by (REPEAT (eresolve_tac [FalseE] 2));
-Level 5
-~ ~ ((P --> Q) | (Q --> P))
- 1. [| P; Q --> False; Q; P --> False |] ==> P
-> by (assume_tac 1);
-Level 6
-~ ~ ((P --> Q) | (Q --> P))
-No subgoals!
-
-
 
 
 (*** Classical examples ***)
 
-goal cla_thy "EX y. ALL x. P(y)-->P(x)";
+context FOL.thy;
+
+Goal "EX y. ALL x. P(y)-->P(x)";
 by (resolve_tac [exCI] 1);
 by (resolve_tac [allI] 1);
 by (resolve_tac [impI] 1);
@@ -119,12 +47,12 @@
 by (resolve_tac [impI] 1);
 by (eresolve_tac [notE] 1);
 by (assume_tac 1);
-goal cla_thy "EX y. ALL x. P(y)-->P(x)";
-by (best_tac FOL_dup_cs 1);
+Goal "EX y. ALL x. P(y)-->P(x)";
+by (Blast_tac 1);
 
 
 
-- goal cla_thy "EX y. ALL x. P(y)-->P(x)";
+- Goal "EX y. ALL x. P(y)-->P(x)";
 Level 0
 EX y. ALL x. P(y) --> P(x)
  1. EX y. ALL x. P(y) --> P(x)
@@ -162,7 +90,7 @@
 Level 8
 EX y. ALL x. P(y) --> P(x)
 No subgoals!
-- goal cla_thy "EX y. ALL x. P(y)-->P(x)";
+- Goal "EX y. ALL x. P(y)-->P(x)";
 Level 0
 EX y. ALL x. P(y) --> P(x)
  1. EX y. ALL x. P(y) --> P(x)
--- a/doc-src/Logics/FOL.tex	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/FOL.tex	Thu Jul 16 11:50:01 1998 +0200
@@ -7,16 +7,16 @@
 \thydx{IFOL}.  Classical logic, theory \thydx{FOL}, is
 obtained by adding the double negation rule.  Basic proof procedures are
 provided.  The intuitionistic prover works with derived rules to simplify
-implications in the assumptions.  Classical~{\tt FOL} employs Isabelle's
+implications in the assumptions.  Classical~\texttt{FOL} employs Isabelle's
 classical reasoner, which simulates a sequent calculus.
 
 \section{Syntax and rules of inference}
 The logic is many-sorted, using Isabelle's type classes.  The class of
-first-order terms is called \cldx{term} and is a subclass of {\tt logic}.
+first-order terms is called \cldx{term} and is a subclass of \texttt{logic}.
 No types of individuals are provided, but extensions can define types such
-as {\tt nat::term} and type constructors such as {\tt list::(term)term}
-(see the examples directory, {\tt FOL/ex}).  Below, the type variable
-$\alpha$ ranges over class {\tt term}; the equality symbol and quantifiers
+as \texttt{nat::term} and type constructors such as \texttt{list::(term)term}
+(see the examples directory, \texttt{FOL/ex}).  Below, the type variable
+$\alpha$ ranges over class \texttt{term}; the equality symbol and quantifiers
 are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
 belongs to class~\cldx{logic}.  Figure~\ref{fol-syntax} gives the syntax.
 Note that $a$\verb|~=|$b$ is translated to $\neg(a=b)$.
@@ -45,8 +45,8 @@
 conj_impE}, etc., support the intuitionistic proof procedure
 (see~\S\ref{fol-int-prover}).
 
-See the files {\tt FOL/IFOL.thy}, {\tt FOL/IFOL.ML} and
-{\tt FOL/intprover.ML} for complete listings of the rules and
+See the files \texttt{FOL/IFOL.thy}, \texttt{FOL/IFOL.ML} and
+\texttt{FOL/intprover.ML} for complete listings of the rules and
 derived rules.
 
 \begin{figure} 
@@ -68,7 +68,7 @@
         universal quantifier ($\forall$) \\
   \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
         existential quantifier ($\exists$) \\
-  {\tt EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
+  \texttt{EX!}  & \cdx{Ex1}  & $(\alpha\To o)\To o$ & 10 & 
         unique existence ($\exists!$)
 \end{tabular}
 \index{*"E"X"! symbol}
@@ -108,7 +108,7 @@
   \end{array}
 \]
 \subcaption{Grammar}
-\caption{Syntax of {\tt FOL}} \label{fol-syntax}
+\caption{Syntax of \texttt{FOL}} \label{fol-syntax}
 \end{figure}
 
 
@@ -197,12 +197,12 @@
 \begin{itemize}
 \item 
 It instantiates the simplifier.  Both equality ($=$) and the biconditional
-($\bimp$) may be used for rewriting.  Tactics such as {\tt Asm_simp_tac} and
-{\tt Full_simp_tac} refer to the default simpset ({\tt simpset()}), which works for
+($\bimp$) may be used for rewriting.  Tactics such as \texttt{Asm_simp_tac} and
+\texttt{Full_simp_tac} refer to the default simpset (\texttt{simpset()}), which works for
 most purposes.  Named simplification sets include \ttindexbold{IFOL_ss},
 for intuitionistic first-order logic, and \ttindexbold{FOL_ss},
 for classical logic.  See the file
-{\tt FOL/simpdata.ML} for a complete listing of the simplification
+\texttt{FOL/simpdata.ML} for a complete listing of the simplification
 rules%
 \iflabelundefined{sec:setting-up-simp}{}%
         {, and \S\ref{sec:setting-up-simp} for discussion}.
@@ -225,7 +225,7 @@
 
 
 \section{Intuitionistic proof procedures} \label{fol-int-prover}
-Implication elimination (the rules~{\tt mp} and~{\tt impE}) pose
+Implication elimination (the rules~\texttt{mp} and~\texttt{impE}) pose
 difficulties for automated proof.  In intuitionistic logic, the assumption
 $P\imp Q$ cannot be treated like $\neg P\disj Q$.  Given $P\imp Q$, we may
 use~$Q$ provided we can prove~$P$; the proof of~$P$ may require repeated
@@ -239,7 +239,7 @@
 \[ \infer[({\imp}E)]{Q}{P\imp Q &
        \infer[({\imp}E)]{P}{(P\imp Q)\imp P & P\imp Q}} 
 \]
-The theorem prover for intuitionistic logic does not use~{\tt impE}.\@
+The theorem prover for intuitionistic logic does not use~\texttt{impE}.\@
 Instead, it simplifies implications using derived rules
 (Fig.\ts\ref{fol-int-derived}).  It reduces the antecedents of implications
 to atoms and then uses Modus Ponens: from $P\imp Q$ and~$P$ deduce~$Q$.
@@ -264,7 +264,7 @@
 IntPr.fast_tac      : int -> tactic
 IntPr.best_tac      : int -> tactic
 \end{ttbox}
-Most of these belong to the structure {\tt IntPr} and resemble the
+Most of these belong to the structure \texttt{IntPr} and resemble the
 tactics of Isabelle's classical reasoner.
 
 \begin{ttdescription}
@@ -277,30 +277,30 @@
 produce multiple outcomes, enumerating all suitable pairs of assumptions.
 
 \item[\ttindexbold{eq_mp_tac} {\it i}] 
-is like {\tt mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
+is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
 is safe.
 
 \item[\ttindexbold{IntPr.safe_step_tac} $i$] performs a safe step on
 subgoal~$i$.  This may include proof by assumption or Modus Ponens (taking
-care not to instantiate unknowns), or {\tt hyp_subst_tac}. 
+care not to instantiate unknowns), or \texttt{hyp_subst_tac}. 
 
 \item[\ttindexbold{IntPr.safe_tac}] repeatedly performs safe steps on all 
 subgoals.  It is deterministic, with at most one outcome.
 
-\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like {\tt safe_step_tac},
+\item[\ttindexbold{IntPr.inst_step_tac} $i$] is like \texttt{safe_step_tac},
 but allows unknowns to be instantiated.
 
-\item[\ttindexbold{IntPr.step_tac} $i$] tries {\tt safe_tac} or {\tt
+\item[\ttindexbold{IntPr.step_tac} $i$] tries \texttt{safe_tac} or {\tt
     inst_step_tac}, or applies an unsafe rule.  This is the basic step of
   the intuitionistic proof procedure.
 
-\item[\ttindexbold{IntPr.fast_tac} $i$] applies {\tt step_tac}, using
+\item[\ttindexbold{IntPr.fast_tac} $i$] applies \texttt{step_tac}, using
 depth-first search, to solve subgoal~$i$.
 
-\item[\ttindexbold{IntPr.best_tac} $i$] applies {\tt step_tac}, using
+\item[\ttindexbold{IntPr.best_tac} $i$] applies \texttt{step_tac}, using
 best-first search (guided by the size of the proof state) to solve subgoal~$i$.
 \end{ttdescription}
-Here are some of the theorems that {\tt IntPr.fast_tac} proves
+Here are some of the theorems that \texttt{IntPr.fast_tac} proves
 automatically.  The latter three date from {\it Principia Mathematica}
 (*11.53, *11.55, *11.61)~\cite{principia}.
 \begin{ttbox}
@@ -337,11 +337,11 @@
 well as classical elimination rules for~$\imp$ and~$\bimp$, and the swap
 rule (see Fig.\ts\ref{fol-cla-derived}).
 
-The classical reasoner is installed.  Tactics such as {\tt Blast_tac} and {\tt
-Best_tac} refer to the default claset ({\tt claset()}), which works for most
+The classical reasoner is installed.  Tactics such as \texttt{Blast_tac} and {\tt
+Best_tac} refer to the default claset (\texttt{claset()}), which works for most
 purposes.  Named clasets include \ttindexbold{prop_cs}, which includes the
 propositional rules, and \ttindexbold{FOL_cs}, which also includes quantifier
-rules.  See the file {\tt FOL/cladata.ML} for lists of the
+rules.  See the file \texttt{FOL/cladata.ML} for lists of the
 classical rules, and 
 \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
         {Chap.\ts\ref{chap:classical}} 
@@ -351,11 +351,15 @@
 \section{An intuitionistic example}
 Here is a session similar to one in {\em Logic and Computation}
 \cite[pages~222--3]{paulson87}.  Isabelle treats quantifiers differently
-from {\sc lcf}-based theorem provers such as {\sc hol}.  The proof begins
-by entering the goal in intuitionistic logic, then applying the rule
-$({\imp}I)$.
+from {\sc lcf}-based theorem provers such as {\sc hol}.  
+
+First, we specify that we are working in intuitionistic logic:
 \begin{ttbox}
-goal IFOL.thy "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
+context IFOL.thy;
+\end{ttbox}
+The proof begins by entering the goal, then applying the rule $({\imp}I)$.
+\begin{ttbox}
+Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
 {\out Level 0}
 {\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
 {\out  1. (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
@@ -375,7 +379,7 @@
 {\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
 {\out  1. !!x. EX y. ALL x. Q(x,y) ==> EX y. Q(x,y)}
 \end{ttbox}
-Applying $({\forall}I)$ replaces the \hbox{\tt ALL x} by \hbox{\tt!!x},
+Applying $({\forall}I)$ replaces the \texttt{ALL~x} by \hbox{\tt!!x},
 changing the universal quantifier from object~($\forall$) to
 meta~($\Forall$).  The bound variable is a {\bf parameter} of the
 subgoal.  We now must choose between $({\exists}I)$ and $({\exists}E)$.  What
@@ -387,8 +391,8 @@
 {\out  1. !!x. EX y. ALL x. Q(x,y) ==> Q(x,?y2(x))}
 \end{ttbox}
 The new subgoal~1 contains the function variable {\tt?y2}.  Instantiating
-{\tt?y2} can replace~{\tt?y2(x)} by a term containing~{\tt x}, even
-though~{\tt x} is a bound variable.  Now we analyse the assumption
+{\tt?y2} can replace~{\tt?y2(x)} by a term containing~\texttt{x}, even
+though~\texttt{x} is a bound variable.  Now we analyse the assumption
 \(\exists y.\forall x. Q(x,y)\) using elimination rules:
 \begin{ttbox}
 by (eresolve_tac [exE] 1);
@@ -396,10 +400,10 @@
 {\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
 {\out  1. !!x y. ALL x. Q(x,y) ==> Q(x,?y2(x))}
 \end{ttbox}
-Applying $(\exists E)$ has produced the parameter {\tt y} and stripped the
+Applying $(\exists E)$ has produced the parameter \texttt{y} and stripped the
 existential quantifier from the assumption.  But the subgoal is unprovable:
-there is no way to unify {\tt ?y2(x)} with the bound variable~{\tt y}.
-Using {\tt choplev} we can return to the critical point.  This time we
+there is no way to unify \texttt{?y2(x)} with the bound variable~\texttt{y}.
+Using \texttt{choplev} we can return to the critical point.  This time we
 apply $({\exists}E)$:
 \begin{ttbox}
 choplev 2;
@@ -427,9 +431,9 @@
 {\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
 {\out  1. !!x y. Q(?x4(x,y),y) ==> Q(x,?y3(x,y))}
 \end{ttbox}
-The subgoal has variables {\tt ?y3} and {\tt ?x4} applied to both
+The subgoal has variables \texttt{?y3} and \texttt{?x4} applied to both
 parameters.  The obvious projection functions unify {\tt?x4(x,y)} with~{\tt
-x} and \verb|?y3(x,y)| with~{\tt y}.
+x} and \verb|?y3(x,y)| with~\texttt{y}.
 \begin{ttbox}
 by (assume_tac 1);
 {\out Level 6}
@@ -440,7 +444,7 @@
 ones.  But proof checking is tedious; \ttindex{IntPr.fast_tac} proves the
 theorem in one step.
 \begin{ttbox}
-goal IFOL.thy "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
+Goal "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))";
 {\out Level 0}
 {\out (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
 {\out  1. (EX y. ALL x. Q(x,y)) --> (ALL x. EX y. Q(x,y))}
@@ -469,7 +473,7 @@
 negations to implications using the definition $\neg P\equiv P\imp\bot$.
 This allows use of the special implication rules.
 \begin{ttbox}
-goalw IFOL.thy [not_def] "~ ~ ((P-->Q) | (Q-->P))";
+Goalw [not_def] "~ ~ ((P-->Q) | (Q-->P))";
 {\out Level 0}
 {\out ~ ~ ((P --> Q) | (Q --> P))}
 {\out  1. ((P --> Q) | (Q --> P) --> False) --> False}
@@ -525,20 +529,24 @@
 {\out ~ ~ ((P --> Q) | (Q --> P))}
 {\out No subgoals!}
 \end{ttbox}
-This proof is also trivial for {\tt IntPr.fast_tac}.
+This proof is also trivial for \texttt{IntPr.fast_tac}.
 
 
 \section{A classical example} \label{fol-cla-example}
 To illustrate classical logic, we shall prove the theorem
 $\ex{y}\all{x}P(y)\imp P(x)$.  Informally, the theorem can be proved as
 follows.  Choose~$y$ such that~$\neg P(y)$, if such exists; otherwise
-$\all{x}P(x)$ is true.  Either way the theorem holds.
+$\all{x}P(x)$ is true.  Either way the theorem holds.  First, we switch to
+classical logic:
+\begin{ttbox}
+context FOL.thy;
+\end{ttbox}
 
 The formal proof does not conform in any obvious way to the sketch given
 above.  The key inference is the first one, \tdx{exCI}; this classical
 version of~$(\exists I)$ allows multiple instantiation of the quantifier.
 \begin{ttbox}
-goal FOL.thy "EX y. ALL x. P(y)-->P(x)";
+Goal "EX y. ALL x. P(y)-->P(x)";
 {\out Level 0}
 {\out EX y. ALL x. P(y) --> P(x)}
 {\out  1. EX y. ALL x. P(y) --> P(x)}
@@ -582,7 +590,7 @@
 {\out EX y. ALL x. P(y) --> P(x)}
 {\out  1. !!x xa. [| P(?a); ~ P(x) |] ==> P(?y3(x)) --> P(xa)}
 \end{ttbox}
-The previous conclusion, {\tt P(x)}, has become a negated assumption.
+The previous conclusion, \texttt{P(x)}, has become a negated assumption.
 \begin{ttbox}
 by (resolve_tac [impI] 1);
 {\out Level 6}
@@ -603,15 +611,16 @@
 {\out EX y. ALL x. P(y) --> P(x)}
 {\out No subgoals!}
 \end{ttbox}
-The civilised way to prove this theorem is through \ttindex{deepen_tac},
+The civilised way to prove this theorem is through \ttindex{Blast_tac},
 which automatically uses the classical version of~$(\exists I)$:
 \begin{ttbox}
-goal FOL.thy "EX y. ALL x. P(y)-->P(x)";
+Goal "EX y. ALL x. P(y)-->P(x)";
 {\out Level 0}
 {\out EX y. ALL x. P(y) --> P(x)}
 {\out  1. EX y. ALL x. P(y) --> P(x)}
-by (Deepen_tac 0 1);
+by (Blast_tac 1);
 {\out Depth = 0}
+{\out Depth = 1}
 {\out Depth = 2}
 {\out Level 1}
 {\out EX y. ALL x. P(y) --> P(x)}
@@ -644,7 +653,7 @@
                                   & \infer*{S}{[\neg P,R]}} 
 \]
 Having made these plans, we get down to work with Isabelle.  The theory of
-classical logic, {\tt FOL}, is extended with the constant
+classical logic, \texttt{FOL}, is extended with the constant
 $if::[o,o,o]\To o$.  The axiom \tdx{if_def} asserts the
 equation~$(if)$.
 \begin{ttbox}
@@ -653,25 +662,33 @@
 rules   if_def "if(P,Q,R) == P&Q | ~P&R"
 end
 \end{ttbox}
-The derivations of the introduction and elimination rules demonstrate the
-methods for rewriting with definitions.  Classical reasoning is required,
-so we use {\tt blast_tac}.
+We create the file \texttt{If.thy} containing these declarations.  (This file
+is on directory \texttt{FOL/ex} in the Isabelle distribution.)  Typing
+\begin{ttbox}
+use_thy "If";  
+\end{ttbox}
+loads that theory and sets it to be the current context.
 
 
 \subsection{Deriving the introduction rule}
+
+The derivations of the introduction and elimination rules demonstrate the
+methods for rewriting with definitions.  Classical reasoning is required,
+so we use \texttt{blast_tac}.
+
 The introduction rule, given the premises $P\Imp Q$ and $\neg P\Imp R$,
 concludes $if(P,Q,R)$.  We propose the conclusion as the main goal
-using~\ttindex{goalw}, which uses {\tt if_def} to rewrite occurrences
+using~\ttindex{goalw}, which uses \texttt{if_def} to rewrite occurrences
 of $if$ in the subgoal.
 \begin{ttbox}
-val prems = goalw If.thy [if_def]
+val prems = Goalw [if_def]
     "[| P ==> Q; ~ P ==> R |] ==> if(P,Q,R)";
 {\out Level 0}
 {\out if(P,Q,R)}
 {\out  1. P & Q | ~ P & R}
 \end{ttbox}
-The premises (bound to the {\ML} variable {\tt prems}) are passed as
-introduction rules to \ttindex{blast_tac}.  Remember that {\tt claset()} refers
+The premises (bound to the {\ML} variable \texttt{prems}) are passed as
+introduction rules to \ttindex{blast_tac}.  Remember that \texttt{claset()} refers
 to the default classical set.
 \begin{ttbox}
 by (blast_tac (claset() addIs prems) 1);
@@ -686,15 +703,15 @@
 The elimination rule has three premises, two of which are themselves rules.
 The conclusion is simply $S$.
 \begin{ttbox}
-val major::prems = goalw If.thy [if_def]
+val major::prems = Goalw [if_def]
    "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~ P; R |] ==> S |] ==> S";
 {\out Level 0}
 {\out S}
 {\out  1. S}
 \end{ttbox}
 The major premise contains an occurrence of~$if$, but the version returned
-by \ttindex{goalw} (and bound to the {\ML} variable~{\tt major}) has the
-definition expanded.  Now \ttindex{cut_facts_tac} inserts~{\tt major} as an
+by \ttindex{goalw} (and bound to the {\ML} variable~\texttt{major}) has the
+definition expanded.  Now \ttindex{cut_facts_tac} inserts~\texttt{major} as an
 assumption in the subgoal, so that \ttindex{blast_tac} can break it down.
 \begin{ttbox}
 by (cut_facts_tac [major] 1);
@@ -712,8 +729,8 @@
 \iflabelundefined{definitions}{{\em Introduction to Isabelle}}%
         {\S\ref{definitions}}, there are other
 ways of treating definitions when deriving a rule.  We can start the
-proof using {\tt goal}, which does not expand definitions, instead of
-{\tt goalw}.  We can use \ttindex{rew_tac}
+proof using \texttt{goal}, which does not expand definitions, instead of
+\texttt{goalw}.  We can use \ttindex{rew_tac}
 to expand definitions in the subgoals --- perhaps after calling
 \ttindex{cut_facts_tac} to insert the rule's premises.  We can use
 \ttindex{rewrite_rule}, which is a meta-inference rule, to expand
@@ -729,12 +746,11 @@
     if(if(P,Q,R), A, B)         & \bimp & if(P,if(Q,A,B),if(R,A,B))
 \end{eqnarray*}
 Proofs also require the classical reasoning rules and the $\bimp$
-introduction rule (called~\tdx{iffI}: do not confuse with~{\tt ifI}). 
+introduction rule (called~\tdx{iffI}: do not confuse with~\texttt{ifI}). 
 
 To display the $if$-rules in action, let us analyse a proof step by step.
 \begin{ttbox}
-goal If.thy
-    "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))";
+Goal "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))";
 {\out Level 0}
 {\out if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
 {\out  1. if(P,if(Q,A,B),if(Q,C,D)) <-> if(Q,if(P,A,C),if(P,B,D))}
@@ -808,7 +824,7 @@
 \end{ttbox}
 This tactic also solves the other example.
 \begin{ttbox}
-goal If.thy "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))";
+Goal "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))";
 {\out Level 0}
 {\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
 {\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,A,B))}
@@ -839,8 +855,8 @@
 {\out     P & (Q & A | ~ Q & B) | ~ P & (R & A | ~ R & B)}
 \end{ttbox}
 We are left with a subgoal in pure first-order logic, which is why the 
-classical reasoner can prove it given {\tt FOL_cs} alone.  (We could, of 
-course, have used {\tt Blast_tac}.)
+classical reasoner can prove it given \texttt{FOL_cs} alone.  (We could, of 
+course, have used \texttt{Blast_tac}.)
 \begin{ttbox}
 by (blast_tac FOL_cs 1);
 {\out Level 2}
@@ -851,7 +867,7 @@
 approach has its merits --- especially if the prover for the base logic is
 good --- but can be slow.  In these examples, proofs using the default
 claset (which includes the derived rules) run about six times faster 
-than proofs using {\tt FOL_cs}.
+than proofs using \texttt{FOL_cs}.
 
 Expanding definitions also complicates error diagnosis.  Suppose we are having
 difficulties in proving some goal.  If by expanding definitions we have
@@ -860,7 +876,7 @@
 Attempts at program verification often yield invalid assertions.
 Let us try to prove one:
 \begin{ttbox}
-goal If.thy "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))";
+Goal "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))";
 {\out Level 0}
 {\out if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
 {\out  1. if(if(P,Q,R),A,B) <-> if(P,if(Q,A,B),if(R,B,A))}
--- a/doc-src/Logics/HOL.tex	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/HOL.tex	Thu Jul 16 11:50:01 1998 +0200
@@ -198,26 +198,26 @@
 
 \subsection{Binders}
 
-Hilbert's {\bf description} operator~$\varepsilon x.P[x]$ stands for
+Hilbert's {\bf description} operator~$\varepsilon x. P[x]$ stands for
 some~$x$ satisfying~$P$, if such exists.  Since all terms in \HOL\ 
 denote something, a description is always meaningful, but we do not
 know its value unless $P$ defines it uniquely.  We may write
-descriptions as \cdx{Eps}($\lambda x.P[x]$) or use the syntax
-\hbox{\tt \at $x$.$P[x]$}.
+descriptions as \cdx{Eps}($\lambda x. P[x]$) or use the syntax
+\hbox{\tt \at $x$. $P[x]$}.
 
 Existential quantification is defined by
-\[ \exists x.P~x \;\equiv\; P(\varepsilon x.P~x). \]
-The unique existence quantifier, $\exists!x.P$, is defined in terms
+\[ \exists x. P~x \;\equiv\; P(\varepsilon x. P~x). \]
+The unique existence quantifier, $\exists!x. P$, is defined in terms
 of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
-quantifications.  For instance, $\exists!x\,y.P\,x\,y$ abbreviates
-$\exists!x. \exists!y.P\,x\,y$; note that this does not mean that there
+quantifications.  For instance, $\exists!x\,y. P\,x\,y$ abbreviates
+$\exists!x. \exists!y. P\,x\,y$; note that this does not mean that there
 exists a unique pair $(x,y)$ satisfying~$P\,x\,y$.
 
 \index{*"! symbol}\index{*"? symbol}\index{HOL system@{\sc hol} system}
 Quantifiers have two notations.  As in Gordon's {\sc hol} system, \HOL\
 uses~{\tt!}\ and~{\tt?}\ to stand for $\forall$ and $\exists$.  The
 existential quantifier must be followed by a space; thus {\tt?x} is an
-unknown, while \verb'? x.f x=y' is a quantification.  Isabelle's usual
+unknown, while \verb'? x. f x=y' is a quantification.  Isabelle's usual
 notation for quantifiers, \sdx{ALL} and \sdx{EX}, is also
 available.  Both notations are accepted for input.  The {\ML} reference
 \ttindexbold{HOL_quantifiers} governs the output notation.  If set to {\tt
@@ -225,7 +225,7 @@
 to \texttt{false}, then~{\tt ALL} and~{\tt EX} are displayed.
 
 If $\tau$ is a type of class \cldx{ord}, $P$ a formula and $x$ a
-variable of type $\tau$, then the term \cdx{LEAST}~$x.P[x]$ is defined
+variable of type $\tau$, then the term \cdx{LEAST}~$x. P[x]$ is defined
 to be the least (w.r.t.\ $\le$) $x$ such that $P~x$ holds (see
 Fig.~\ref{hol-defs}).  The definition uses Hilbert's $\varepsilon$
 choice operator, so \texttt{Least} is always meaningful, but may yield
@@ -238,7 +238,7 @@
 \begin{warn}
 The low priority of binders means that they need to be enclosed in
 parenthesis when they occur in the context of other operations.  For example,
-instead of $P \land \forall x.Q$ you need to write $P \land (\forall x.Q)$.
+instead of $P \land \forall x. Q$ you need to write $P \land (\forall x. Q)$.
 \end{warn}
 
 
@@ -271,11 +271,11 @@
 \begin{ttbox}\makeatother
 \tdx{refl}           t = (t::'a)
 \tdx{subst}          [| s = t; P s |] ==> P (t::'a)
-\tdx{ext}            (!!x::'a. (f x :: 'b) = g x) ==> (\%x.f x) = (\%x.g x)
+\tdx{ext}            (!!x::'a. (f x :: 'b) = g x) ==> (\%x. f x) = (\%x. g x)
 \tdx{impI}           (P ==> Q) ==> P-->Q
 \tdx{mp}             [| P-->Q;  P |] ==> Q
 \tdx{iff}            (P-->Q) --> (Q-->P) --> (P=Q)
-\tdx{selectI}        P(x::'a) ==> P(@x.P x)
+\tdx{selectI}        P(x::'a) ==> P(@x. P x)
 \tdx{True_or_False}  (P=True) | (P=False)
 \end{ttbox}
 \caption{The \texttt{HOL} rules} \label{hol-rules}
@@ -299,10 +299,10 @@
 
 \begin{figure}\hfuzz=4pt%suppress "Overfull \hbox" message
 \begin{ttbox}\makeatother
-\tdx{True_def}   True     == ((\%x::bool.x)=(\%x.x))
-\tdx{All_def}    All      == (\%P. P = (\%x.True))
-\tdx{Ex_def}     Ex       == (\%P. P(@x.P x))
-\tdx{False_def}  False    == (!P.P)
+\tdx{True_def}   True     == ((\%x::bool. x)=(\%x. x))
+\tdx{All_def}    All      == (\%P. P = (\%x. True))
+\tdx{Ex_def}     Ex       == (\%P. P(@x. P x))
+\tdx{False_def}  False    == (!P. P)
 \tdx{not_def}    not      == (\%P. P-->False)
 \tdx{and_def}    op &     == (\%P Q. !R. (P-->Q-->R) --> R)
 \tdx{or_def}     op |     == (\%P Q. !R. (P-->R) --> (Q-->R) --> R)
@@ -384,18 +384,18 @@
 \begin{figure}
 \begin{ttbox}\makeatother
 \tdx{allI}      (!!x. P x) ==> !x. P x
-\tdx{spec}      !x.P x ==> P x
-\tdx{allE}      [| !x.P x;  P x ==> R |] ==> R
-\tdx{all_dupE}  [| !x.P x;  [| P x; !x.P x |] ==> R |] ==> R
+\tdx{spec}      !x. P x ==> P x
+\tdx{allE}      [| !x. P x;  P x ==> R |] ==> R
+\tdx{all_dupE}  [| !x. P x;  [| P x; !x. P x |] ==> R |] ==> R
 
 \tdx{exI}       P x ==> ? x. P x
 \tdx{exE}       [| ? x. P x; !!x. P x ==> Q |] ==> Q
 
 \tdx{ex1I}      [| P a;  !!x. P x ==> x=a |] ==> ?! x. P x
-\tdx{ex1E}      [| ?! x.P x;  !!x. [| P x;  ! y. P y --> y=x |] ==> R 
+\tdx{ex1E}      [| ?! x. P x;  !!x. [| P x;  ! y. P y --> y=x |] ==> R 
           |] ==> R
 
-\tdx{select_equality} [| P a;  !!x. P x ==> x=a |] ==> (@x.P x) = a
+\tdx{select_equality} [| P a;  !!x. P x ==> x=a |] ==> (@x. P x) = a
 \subcaption{Quantifiers and descriptions}
 
 \tdx{ccontr}          (~P ==> False) ==> P
@@ -403,7 +403,7 @@
 \tdx{excluded_middle} ~P | P
 
 \tdx{disjCI}          (~Q ==> P) ==> P|Q
-\tdx{exCI}            (! x. ~ P x ==> P a) ==> ? x.P x
+\tdx{exCI}            (! x. ~ P x ==> P a) ==> ? x. P x
 \tdx{impCE}           [| P-->Q; ~ P ==> R; Q ==> R |] ==> R
 \tdx{iffCE}           [| P=Q;  [| P;Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
 \tdx{notnotD}         ~~P ==> P
@@ -511,17 +511,17 @@
   \it external          & \it internal  & \it description \\ 
   $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm non-membership\\
   {\ttlbrace}$a@1$, $\ldots${\ttrbrace}  &  insert $a@1$ $\ldots$ {\ttlbrace}{\ttrbrace} & \rm finite set \\
-  {\ttlbrace}$x$.$P[x]${\ttrbrace}        &  Collect($\lambda x.P[x]$) &
+  {\ttlbrace}$x$. $P[x]${\ttrbrace}        &  Collect($\lambda x. P[x]$) &
         \rm comprehension \\
-  \sdx{INT} $x$:$A$.$B[x]$      & INTER $A$ $\lambda x.B[x]$ &
+  \sdx{INT} $x$:$A$. $B[x]$      & INTER $A$ $\lambda x. B[x]$ &
         \rm intersection \\
-  \sdx{UN}{\tt\ }  $x$:$A$.$B[x]$      & UNION $A$ $\lambda x.B[x]$ &
+  \sdx{UN}{\tt\ }  $x$:$A$. $B[x]$      & UNION $A$ $\lambda x. B[x]$ &
         \rm union \\
-  \tt ! $x$:$A$.$P[x]$ or \sdx{ALL} $x$:$A$.$P[x]$ & 
-        Ball $A$ $\lambda x.P[x]$ & 
+  \tt ! $x$:$A$. $P[x]$ or \sdx{ALL} $x$:$A$. $P[x]$ & 
+        Ball $A$ $\lambda x. P[x]$ & 
         \rm bounded $\forall$ \\
-  \sdx{?} $x$:$A$.$P[x]$ or \sdx{EX}{\tt\ } $x$:$A$.$P[x]$ & 
-        Bex $A$ $\lambda x.P[x]$ & \rm bounded $\exists$
+  \sdx{?} $x$:$A$. $P[x]$ or \sdx{EX}{\tt\ } $x$:$A$. $P[x]$ & 
+        Bex $A$ $\lambda x. P[x]$ & \rm bounded $\exists$
 \end{tabular}
 \end{center}
 \subcaption{Translations}
@@ -596,35 +596,35 @@
   \texttt{insert} \, a \, ({\tt insert} \, b \, ({\tt insert} \, c \, \{\}))
 \end{eqnarray*}
 
-The set \hbox{\tt{\ttlbrace}$x$.$P[x]${\ttrbrace}} consists of all $x$ (of suitable type)
+The set \hbox{\tt{\ttlbrace}$x$. $P[x]${\ttrbrace}} consists of all $x$ (of suitable type)
 that satisfy~$P[x]$, where $P[x]$ is a formula that may contain free
 occurrences of~$x$.  This syntax expands to \cdx{Collect}$(\lambda
-x.P[x])$.  It defines sets by absolute comprehension, which is impossible
+x. P[x])$.  It defines sets by absolute comprehension, which is impossible
 in~{\ZF}; the type of~$x$ implicitly restricts the comprehension.
 
 The set theory defines two {\bf bounded quantifiers}:
 \begin{eqnarray*}
-   \forall x\in A.P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
-   \exists x\in A.P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
+   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
+   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
 \end{eqnarray*}
 The constants~\cdx{Ball} and~\cdx{Bex} are defined
 accordingly.  Instead of \texttt{Ball $A$ $P$} and \texttt{Bex $A$ $P$} we may
 write\index{*"! symbol}\index{*"? symbol}
 \index{*ALL symbol}\index{*EX symbol} 
 %
-\hbox{\tt !~$x$:$A$.$P[x]$} and \hbox{\tt ?~$x$:$A$.$P[x]$}.  Isabelle's
+\hbox{\tt !~$x$:$A$. $P[x]$} and \hbox{\tt ?~$x$:$A$. $P[x]$}.  Isabelle's
 usual quantifier symbols, \sdx{ALL} and \sdx{EX}, are also accepted
 for input.  As with the primitive quantifiers, the {\ML} reference
 \ttindex{HOL_quantifiers} specifies which notation to use for output.
 
 Unions and intersections over sets, namely $\bigcup@{x\in A}B[x]$ and
 $\bigcap@{x\in A}B[x]$, are written 
-\sdx{UN}~\hbox{\tt$x$:$A$.$B[x]$} and
-\sdx{INT}~\hbox{\tt$x$:$A$.$B[x]$}.  
+\sdx{UN}~\hbox{\tt$x$:$A$. $B[x]$} and
+\sdx{INT}~\hbox{\tt$x$:$A$. $B[x]$}.  
 
 Unions and intersections over types, namely $\bigcup@x B[x]$ and $\bigcap@x
-B[x]$, are written \sdx{UN}~\hbox{\tt$x$.$B[x]$} and
-\sdx{INT}~\hbox{\tt$x$.$B[x]$}.  They are equivalent to the previous
+B[x]$, are written \sdx{UN}~\hbox{\tt$x$. $B[x]$} and
+\sdx{INT}~\hbox{\tt$x$. $B[x]$}.  They are equivalent to the previous
 union and intersection operators when $A$ is the universal set.
 
 The operators $\bigcup A$ and $\bigcap A$ act upon sets of sets.  They are
@@ -635,22 +635,22 @@
 
 \begin{figure} \underscoreon
 \begin{ttbox}
-\tdx{mem_Collect_eq}    (a : {\ttlbrace}x.P x{\ttrbrace}) = P a
-\tdx{Collect_mem_eq}    {\ttlbrace}x.x:A{\ttrbrace} = A
+\tdx{mem_Collect_eq}    (a : {\ttlbrace}x. P x{\ttrbrace}) = P a
+\tdx{Collect_mem_eq}    {\ttlbrace}x. x:A{\ttrbrace} = A
 
-\tdx{empty_def}         {\ttlbrace}{\ttrbrace}          == {\ttlbrace}x.False{\ttrbrace}
-\tdx{insert_def}        insert a B  == {\ttlbrace}x.x=a{\ttrbrace} Un B
+\tdx{empty_def}         {\ttlbrace}{\ttrbrace}          == {\ttlbrace}x. False{\ttrbrace}
+\tdx{insert_def}        insert a B  == {\ttlbrace}x. x=a{\ttrbrace} Un B
 \tdx{Ball_def}          Ball A P    == ! x. x:A --> P x
 \tdx{Bex_def}           Bex A P     == ? x. x:A & P x
 \tdx{subset_def}        A <= B      == ! x:A. x:B
-\tdx{Un_def}            A Un B      == {\ttlbrace}x.x:A | x:B{\ttrbrace}
-\tdx{Int_def}           A Int B     == {\ttlbrace}x.x:A & x:B{\ttrbrace}
-\tdx{set_diff_def}      A - B       == {\ttlbrace}x.x:A & x~:B{\ttrbrace}
+\tdx{Un_def}            A Un B      == {\ttlbrace}x. x:A | x:B{\ttrbrace}
+\tdx{Int_def}           A Int B     == {\ttlbrace}x. x:A & x:B{\ttrbrace}
+\tdx{set_diff_def}      A - B       == {\ttlbrace}x. x:A & x~:B{\ttrbrace}
 \tdx{Compl_def}         Compl A     == {\ttlbrace}x. ~ x:A{\ttrbrace}
 \tdx{INTER_def}         INTER A B   == {\ttlbrace}y. ! x:A. y: B x{\ttrbrace}
 \tdx{UNION_def}         UNION A B   == {\ttlbrace}y. ? x:A. y: B x{\ttrbrace}
-\tdx{INTER1_def}        INTER1 B    == INTER {\ttlbrace}x.True{\ttrbrace} B 
-\tdx{UNION1_def}        UNION1 B    == UNION {\ttlbrace}x.True{\ttrbrace} B 
+\tdx{INTER1_def}        INTER1 B    == INTER {\ttlbrace}x. True{\ttrbrace} B 
+\tdx{UNION1_def}        UNION1 B    == UNION {\ttlbrace}x. True{\ttrbrace} B 
 \tdx{Inter_def}         Inter S     == (INT x:S. x)
 \tdx{Union_def}         Union S     == (UN  x:S. x)
 \tdx{Pow_def}           Pow A       == {\ttlbrace}B. B <= A{\ttrbrace}
@@ -663,20 +663,20 @@
 
 \begin{figure} \underscoreon
 \begin{ttbox}
-\tdx{CollectI}        [| P a |] ==> a : {\ttlbrace}x.P x{\ttrbrace}
-\tdx{CollectD}        [| a : {\ttlbrace}x.P x{\ttrbrace} |] ==> P a
-\tdx{CollectE}        [| a : {\ttlbrace}x.P x{\ttrbrace};  P a ==> W |] ==> W
+\tdx{CollectI}        [| P a |] ==> a : {\ttlbrace}x. P x{\ttrbrace}
+\tdx{CollectD}        [| a : {\ttlbrace}x. P x{\ttrbrace} |] ==> P a
+\tdx{CollectE}        [| a : {\ttlbrace}x. P x{\ttrbrace};  P a ==> W |] ==> W
 
 \tdx{ballI}           [| !!x. x:A ==> P x |] ==> ! x:A. P x
 \tdx{bspec}           [| ! x:A. P x;  x:A |] ==> P x
 \tdx{ballE}           [| ! x:A. P x;  P x ==> Q;  ~ x:A ==> Q |] ==> Q
 
 \tdx{bexI}            [| P x;  x:A |] ==> ? x:A. P x
-\tdx{bexCI}           [| ! x:A. ~ P x ==> P a;  a:A |] ==> ? x:A.P x
+\tdx{bexCI}           [| ! x:A. ~ P x ==> P a;  a:A |] ==> ? x:A. P x
 \tdx{bexE}            [| ? x:A. P x;  !!x. [| x:A; P x |] ==> Q  |] ==> Q
 \subcaption{Comprehension and Bounded quantifiers}
 
-\tdx{subsetI}         (!!x.x:A ==> x:B) ==> A <= B
+\tdx{subsetI}         (!!x. x:A ==> x:B) ==> A <= B
 \tdx{subsetD}         [| A <= B;  c:A |] ==> c:B
 \tdx{subsetCE}        [| A <= B;  ~ (c:A) ==> P;  c:B ==> P |] ==> P
 
@@ -822,8 +822,8 @@
 \tdx{Un_assoc}          (A Un B)  Un C  =  A Un (B Un C)
 \tdx{Un_Int_distrib}    (A Int B) Un C  =  (A Un C) Int (B Un C)
 
-\tdx{Compl_disjoint}    A Int (Compl A) = {\ttlbrace}x.False{\ttrbrace}
-\tdx{Compl_partition}   A Un  (Compl A) = {\ttlbrace}x.True{\ttrbrace}
+\tdx{Compl_disjoint}    A Int (Compl A) = {\ttlbrace}x. False{\ttrbrace}
+\tdx{Compl_partition}   A Un  (Compl A) = {\ttlbrace}x. True{\ttrbrace}
 \tdx{double_complement} Compl(Compl A) = A
 \tdx{Compl_Un}          Compl(A Un B)  = (Compl A) Int (Compl B)
 \tdx{Compl_Int}         Compl(A Int B) = (Compl A) Un (Compl B)
@@ -1078,12 +1078,12 @@
 In addition, it is possible to use tuples
 as patterns in abstractions:
 \begin{center}
-{\tt\%($x$,$y$).$t$} \quad stands for\quad \texttt{split(\%$x$\thinspace$y$.$t$)} 
+{\tt\%($x$,$y$). $t$} \quad stands for\quad \texttt{split(\%$x$\thinspace$y$. $t$)} 
 \end{center}
 Nested patterns are also supported.  They are translated stepwise:
-{\tt\%($x$,$y$,$z$).$t$} $\leadsto$ {\tt\%($x$,($y$,$z$)).$t$} $\leadsto$
-{\tt split(\%$x$.\%($y$,$z$).$t$)} $\leadsto$ \texttt{split(\%$x$.split(\%$y$
-  $z$.$t$))}.  The reverse translation is performed upon printing.
+{\tt\%($x$,$y$,$z$). $t$} $\leadsto$ {\tt\%($x$,($y$,$z$)). $t$} $\leadsto$
+{\tt split(\%$x$.\%($y$,$z$). $t$)} $\leadsto$ \texttt{split(\%$x$. split(\%$y$
+  $z$. $t$))}.  The reverse translation is performed upon printing.
 \begin{warn}
   The translation between patterns and \texttt{split} is performed automatically
   by the parser and printer.  Thus the internal and external form of a term
@@ -1762,9 +1762,9 @@
 We want to define the type $\alpha~mylist$.\footnote{This is just an
   example, there is already a list type in \HOL, of course.} To do
 this we have to build a new theory that contains the type definition.
-We start from the basic \texttt{HOL} theory.
+We start from the theory of arithmetic.
 \begin{ttbox}
-MyList = HOL +
+MyList = Arith +
   datatype 'a mylist = Nil | Cons 'a ('a mylist)
 end
 \end{ttbox}
@@ -1773,7 +1773,7 @@
 goal with $x$ quantified at the object-level.  This will be stripped
 later using \ttindex{qed_spec_mp}.
 \begin{ttbox}
-goal MyList.thy "!x. Cons x xs ~= xs";
+Goal "!x. Cons x xs ~= xs";
 {\out Level 0}
 {\out ! x. Cons x xs ~= xs}
 {\out  1. ! x. Cons x xs ~= xs}
@@ -1823,7 +1823,7 @@
 notation \verb|#| for \texttt{Cons}.  To do this we simply add mixfix
 annotations after the constructor declarations as follows:
 \begin{ttbox}
-MyList = HOL +
+MyList = Arith +
   datatype 'a mylist =
     Nil ("[]")  |
     Cons 'a ('a mylist)  (infixr "#" 70)
@@ -1837,7 +1837,7 @@
 
 This example shows a datatype that consists of 7 constructors:
 \begin{ttbox}
-Days = Arith +
+Days = Main +
   datatype days = Mon | Tue | Wed | Thu | Fri | Sat | Sun
 end
 \end{ttbox}
@@ -1847,7 +1847,7 @@
 contained among the distinctness theorems, but the simplifier can
 prove it thanks to rewrite rules inherited from theory \texttt{Arith}:
 \begin{ttbox}
-goal Days.thy "Mon ~= Tue";
+Goal "Mon ~= Tue";
 by (Simp_tac 1);
 \end{ttbox}
 You need not derive such inequalities explicitly: the simplifier will dispose
@@ -1939,7 +1939,7 @@
 \item \textit{function} is the name of the function, either as an \textit{id}
   or a \textit{string}.
 \item \textit{type} is the name of the datatype, either as an \textit{id} or
-  in the long form \texttt{$T$.$t$} ($T$ is the name of the theory
+  in the long form \texttt{$T$. $t$} ($T$ is the name of the theory
   where the datatype has been declared, $t$ the name of the datatype).
   The long form is required if the \texttt{datatype} and the {\tt
     primrec} sections are in different theories.
@@ -1970,7 +1970,7 @@
 The reduction rules for {\tt\at} become part of the default simpset, which
 leads to short proofs:
 \begin{ttbox}\underscoreon
-goal Append.thy "(xs @ ys) @ zs = xs @ (ys @ zs)";
+Goal "(xs @ ys) @ zs = xs @ (ys @ zs)";
 by (induct\_tac "xs" 1);
 by (ALLGOALS Asm\_simp\_tac);
 \end{ttbox}
@@ -2051,10 +2051,10 @@
 \end{itemize}
 
 We can use \texttt{measure} to declare Euclid's algorithm for the greatest
-common divisor.  The measure function, $\lambda(m,n).n$, specifies that the
+common divisor.  The measure function, $\lambda(m,n). n$, specifies that the
 recursion terminates because argument~$n$ decreases.
 \begin{ttbox}
-recdef gcd "measure ((\%(m,n).n) ::nat*nat=>nat)"
+recdef gcd "measure ((\%(m,n). n) ::nat*nat=>nat)"
     "gcd (m, n) = (if n=0 then m else gcd(n, m mod n))"
 \end{ttbox}
 
@@ -2126,7 +2126,7 @@
 Isabelle can prove the \texttt{gcd} function's termination condition
 automatically if supplied with the right simpset.
 \begin{ttbox}
-recdef gcd "measure ((\%(m,n).n) ::nat*nat=>nat)"
+recdef gcd "measure ((\%(m,n). n) ::nat*nat=>nat)"
   simpset "simpset() addsimps [mod_less_divisor, zero_less_eq]"
     "gcd (m, n) = (if n=0 then m else gcd(n, m mod n))"
 \end{ttbox}
@@ -2447,10 +2447,14 @@
 its powerset, some subset is outside its range.  
 
 The Isabelle proof uses \HOL's set theory, with the type $\alpha\,set$ and
-the operator \cdx{range}.  The set~$S$ is given as an unknown instead of a
+the operator \cdx{range}.
+\begin{ttbox}
+context Set.thy;
+\end{ttbox}
+The set~$S$ is given as an unknown instead of a
 quantified variable so that we may inspect the subset found by the proof.
 \begin{ttbox}
-goal Set.thy "?S ~: range\thinspace(f :: 'a=>'a set)";
+Goal "?S ~: range\thinspace(f :: 'a=>'a set)";
 {\out Level 0}
 {\out ?S ~: range f}
 {\out  1. ?S ~: range f}
--- a/doc-src/Logics/LK-eg.txt	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/LK-eg.txt	Thu Jul 16 11:50:01 1998 +0200
@@ -3,7 +3,9 @@
 Pretty.setmargin 72;  (*existing macros just allow this margin*)
 print_depth 0;
 
-goal LK_Rule.thy "|- EX y. ALL x. P(y)-->P(x)";
+Context LK.thy;
+
+Goal "|- EX y. ALL x. P(y)-->P(x)";
 by (resolve_tac [exR] 1);
 by (resolve_tac [allR] 1);
 by (resolve_tac [impR] 1);
@@ -13,12 +15,12 @@
 by (resolve_tac [allR] 1);
 by (resolve_tac [impR] 1);
 by (resolve_tac [basic] 1);
-goal LK_Rule.thy "|- EX y. ALL x. P(y)-->P(x)";
+Goal "|- EX y. ALL x. P(y)-->P(x)";
 by (best_tac LK_dup_pack 1);
 
 
 
-goal LK_Rule.thy "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
+Goal "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
 by (resolve_tac [notR] 1);
 by (resolve_tac [exL] 1);
 by (resolve_tac [allL_thin] 1);
@@ -29,8 +31,10 @@
 by (resolve_tac [basic] 1);
 
 
+STOP_HERE;
 
-> goal LK_Rule.thy "|- EX y. ALL x. P(y)-->P(x)";
+
+> Goal "|- EX y. ALL x. P(y)-->P(x)";
 Level 0
  |- EX y. ALL x. P(y) --> P(x)
  1.  |- EX y. ALL x. P(y) --> P(x)
@@ -65,7 +69,7 @@
  |- EX y. ALL x. P(y) --> P(x)
 No subgoals!
 
-> goal LK_Rule.thy "|- EX y. ALL x. P(y)-->P(x)";
+> Goal "|- EX y. ALL x. P(y)-->P(x)";
 Level 0
  |- EX y. ALL x. P(y) --> P(x)
  1.  |- EX y. ALL x. P(y) --> P(x)
@@ -77,7 +81,7 @@
 
 
 
-> goal LK_Rule.thy "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
+> Goal "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
 Level 0
  |- ~(EX x. ALL y. F(y,x) <-> ~F(y,y))
  1.  |- ~(EX x. ALL y. F(y,x) <-> ~F(y,y))
--- a/doc-src/Logics/LK.tex	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/LK.tex	Thu Jul 16 11:50:01 1998 +0200
@@ -23,6 +23,15 @@
 logic theorem prover that is as powerful as the generic classical reasoner,
 except that it does not perform equality reasoning.
 
+To work in LK, start up Isabelle specifying  \texttt{Sequents} as the
+object-logic.  Once in Isabelle, change the context to theory \texttt{LK.thy}:
+\begin{ttbox}
+isabelle Sequents
+context LK.thy;
+\end{ttbox}
+Model logic and linear logic are also available, but unfortunately they are
+not documented.
+
 
 \begin{figure} 
 \begin{center}
@@ -111,15 +120,15 @@
 Higher-order unification includes associative unification as a special
 case, by an encoding that involves function composition
 \cite[page~37]{huet78}.  To represent lists, let $C$ be a new constant.
-The empty list is $\lambda x.x$, while $[t@1,t@2,\ldots,t@n]$ is
+The empty list is $\lambda x. x$, while $[t@1,t@2,\ldots,t@n]$ is
 represented by
-\[ \lambda x.C(t@1,C(t@2,\ldots,C(t@n,x))).  \]
+\[ \lambda x. C(t@1,C(t@2,\ldots,C(t@n,x))).  \]
 The unifiers of this with $\lambda x.\Var{f}(\Var{g}(x))$ give all the ways
 of expressing $[t@1,t@2,\ldots,t@n]$ as the concatenation of two lists.
 
 Unlike orthodox associative unification, this technique can represent certain
 infinite sets of unifiers by flex-flex equations.   But note that the term
-$\lambda x.C(t,\Var{a})$ does not represent any list.  Flex-flex constraints
+$\lambda x. C(t,\Var{a})$ does not represent any list.  Flex-flex constraints
 containing such garbage terms may accumulate during a proof.
 \index{flex-flex constraints}
 
@@ -172,14 +181,14 @@
 
 \tdx{FalseL}  $H, False, $G |- $E
 
-\tdx{allR}    (!!x.$H|- $E, P(x), $F) ==> $H|- $E, ALL x.P(x), $F
-\tdx{allL}    $H, P(x), $G, ALL x.P(x) |- $E ==> $H, ALL x.P(x), $G|- $E
+\tdx{allR}    (!!x. $H|- $E, P(x), $F) ==> $H|- $E, ALL x. P(x), $F
+\tdx{allL}    $H, P(x), $G, ALL x. P(x) |- $E ==> $H, ALL x. P(x), $G|- $E
             
-\tdx{exR}     $H|- $E, P(x), $F, EX x.P(x) ==> $H|- $E, EX x.P(x), $F
-\tdx{exL}     (!!x.$H, P(x), $G|- $E) ==> $H, EX x.P(x), $G|- $E
+\tdx{exR}     $H|- $E, P(x), $F, EX x. P(x) ==> $H|- $E, EX x. P(x), $F
+\tdx{exL}     (!!x. $H, P(x), $G|- $E) ==> $H, EX x. P(x), $G|- $E
 
-\tdx{The}     [| $H |- $E, P(a), $F;  !!x.$H, P(x) |- $E, x=a, $F |] ==>
-        $H |- $E, P(THE x.P(x)), $F
+\tdx{The}     [| $H |- $E, P(a), $F;  !!x. $H, P(x) |- $E, x=a, $F |] ==>
+        $H |- $E, P(THE x. P(x)), $F
 \subcaption{Logical rules}
 \end{ttbox}
 
@@ -194,11 +203,11 @@
 by the representation of sequents.  Type $sobj\To sobj$ represents a list
 of formulae.
 
-The {\bf definite description} operator~$\iota x.P[x]$ stands for some~$a$
+The {\bf definite description} operator~$\iota x. P[x]$ stands for some~$a$
 satisfying~$P[a]$, if one exists and is unique.  Since all terms in \LK{}
 denote something, a description is always meaningful, but we do not know
 its value unless $P[x]$ defines it uniquely.  The Isabelle notation is
-\hbox{\tt THE $x$.$P[x]$}.  The corresponding rule (Fig.\ts\ref{lk-rules})
+\hbox{\tt THE $x$. $P[x]$}.  The corresponding rule (Fig.\ts\ref{lk-rules})
 does not entail the Axiom of Choice because it requires uniqueness.
 
 Figure~\ref{lk-grammar} presents the grammar of \LK.  Traditionally,
@@ -241,8 +250,8 @@
 \tdx{iffL}        [| $H, $G |- $E, P, Q;  $H, Q, P, $G |- $E |] ==>
             $H, P<->Q, $G |- $E
 
-\tdx{allL_thin}   $H, P(x), $G |- $E ==> $H, ALL x.P(x), $G |- $E
-\tdx{exR_thin}    $H |- $E, P(x), $F ==> $H |- $E, EX x.P(x), $F
+\tdx{allL_thin}   $H, P(x), $G |- $E ==> $H, ALL x. P(x), $G |- $E
+\tdx{exR_thin}    $H |- $E, P(x), $F ==> $H |- $E, EX x. P(x), $F
 \end{ttbox}
 
 \caption{Derived rules for {\tt LK}} \label{lk-derived}
@@ -470,7 +479,7 @@
 proofs are essentially the same; the key step here is to use \tdx{exR}
 rather than the weaker~\tdx{exR_thin}.
 \begin{ttbox}
-goal LK.thy "|- EX y. ALL x. P(y)-->P(x)";
+Goal "|- EX y. ALL x. P(y)-->P(x)";
 {\out Level 0}
 {\out  |- EX y. ALL x. P(y) --> P(x)}
 {\out  1.  |- EX y. ALL x. P(y) --> P(x)}
@@ -517,7 +526,7 @@
 {\out  1. !!x xa. P(?x), P(?x7(x)) |- P(x), P(xa)}
 \end{ttbox}
 Subgoal~1 seems to offer lots of possibilities.  Actually the only useful
-step is instantiating~$\Var{x@7}$ to $\lambda x.x$,
+step is instantiating~$\Var{x@7}$ to $\lambda x. x$,
 transforming~$\Var{x@7}(x)$ into~$x$.
 \begin{ttbox}
 by (resolve_tac [basic] 1);
@@ -528,7 +537,7 @@
 This theorem can be proved automatically.  Because it involves quantifier
 duplication, we employ best-first search:
 \begin{ttbox}
-goal LK.thy "|- EX y. ALL x. P(y)-->P(x)";
+Goal "|- EX y. ALL x. P(y)-->P(x)";
 {\out Level 0}
 {\out  |- EX y. ALL x. P(y) --> P(x)}
 {\out  1.  |- EX y. ALL x. P(y) --> P(x)}
@@ -553,7 +562,7 @@
 
 We set the main goal and move the negated formula to the left.
 \begin{ttbox}
-goal LK.thy "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
+Goal "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
 {\out Level 0}
 {\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
 {\out  1.  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
--- a/doc-src/Logics/ZF-eg.txt	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/ZF-eg.txt	Thu Jul 16 11:50:01 1998 +0200
@@ -27,17 +27,17 @@
 choplev 0;
 by (fast_tac (ZF_cs addIs [equalityI]) 1);
 
-val [prem] = goal ZF.thy "C<=D ==> Union(C) <= Union(D)";
+Goal "C<=D ==> Union(C) <= Union(D)";
 by (resolve_tac [subsetI] 1);
 by (eresolve_tac [UnionE] 1);
 by (resolve_tac [UnionI] 1);
-by (resolve_tac [prem RS subsetD] 1);
+by (eresolve_tac [subsetD] 1);
 by (assume_tac 1);
 by (assume_tac 1);
 choplev 0;
 by (resolve_tac [Union_least] 1);
 by (resolve_tac [Union_upper] 1);
-by (eresolve_tac [prem RS subsetD] 1);
+by (eresolve_tac [subsetD] 1);
 
 
 val prems = goal ZF.thy
@@ -54,6 +54,19 @@
 by (resolve_tac prems 1);
 
 
+Goal "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \
+\     (f Un g)`a = f`a";
+by (resolve_tac [apply_equality] 1);
+by (resolve_tac [UnI1] 1);
+by (resolve_tac [apply_Pair] 1);
+by (assume_tac 1);
+by (assume_tac 1);
+by (resolve_tac [fun_disjoint_Un] 1);
+by (assume_tac 1);
+by (assume_tac 1);
+by (assume_tac 1);
+
+
 
 
 goal ZF.thy "f``(UN x:A. B(x)) = (UN x:A. f``B(x))";
--- a/doc-src/Logics/ZF.tex	Thu Jul 16 10:35:31 1998 +0200
+++ b/doc-src/Logics/ZF.tex	Thu Jul 16 11:50:01 1998 +0200
@@ -195,31 +195,31 @@
   <$a@1$, $\ldots$, $a@{n-1}$, $a@n$> & 
         Pair($a@1$,\ldots,Pair($a@{n-1}$,$a@n$)\ldots) &
         \rm ordered $n$-tuple \\
-  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x.P[x]$) &
+  \ttlbrace$x$:$A . P[x]$\ttrbrace    &  Collect($A$,$\lambda x. P[x]$) &
         \rm separation \\
-  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y.Q[x,y]$) &
+  \ttlbrace$y . x$:$A$, $Q[x,y]$\ttrbrace  &  Replace($A$,$\lambda x\,y. Q[x,y]$) &
         \rm replacement \\
-  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x.b[x]$) &
+  \ttlbrace$b[x] . x$:$A$\ttrbrace  &  RepFun($A$,$\lambda x. b[x]$) &
         \rm functional replacement \\
   \sdx{INT} $x$:$A . B[x]$      & Inter(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
         \rm general intersection \\
   \sdx{UN}  $x$:$A . B[x]$      & Union(\ttlbrace$B[x] . x$:$A$\ttrbrace) &
         \rm general union \\
-  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x.B[x]$) & 
+  \sdx{PROD} $x$:$A . B[x]$     & Pi($A$,$\lambda x. B[x]$) & 
         \rm general product \\
-  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x.B[x]$) & 
+  \sdx{SUM}  $x$:$A . B[x]$     & Sigma($A$,$\lambda x. B[x]$) & 
         \rm general sum \\
-  $A$ -> $B$            & Pi($A$,$\lambda x.B$) & 
+  $A$ -> $B$            & Pi($A$,$\lambda x. B$) & 
         \rm function space \\
-  $A$ * $B$             & Sigma($A$,$\lambda x.B$) & 
+  $A$ * $B$             & Sigma($A$,$\lambda x. B$) & 
         \rm binary product \\
-  \sdx{THE}  $x . P[x]$ & The($\lambda x.P[x]$) & 
+  \sdx{THE}  $x . P[x]$ & The($\lambda x. P[x]$) & 
         \rm definite description \\
-  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x.b[x]$) & 
+  \sdx{lam}  $x$:$A . b[x]$     & Lambda($A$,$\lambda x. b[x]$) & 
         \rm $\lambda$-abstraction\\[1ex]
-  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x.P[x]$) & 
+  \sdx{ALL} $x$:$A . P[x]$      & Ball($A$,$\lambda x. P[x]$) & 
         \rm bounded $\forall$ \\
-  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x.P[x]$) & 
+  \sdx{EX}  $x$:$A . P[x]$      & Bex($A$,$\lambda x. P[x]$) & 
         \rm bounded $\exists$
 \end{tabular}
 \end{center}
@@ -278,17 +278,17 @@
 \section{Binding operators}
 The constant \cdx{Collect} constructs sets by the principle of {\bf
   separation}.  The syntax for separation is
-\hbox{\tt\ttlbrace$x$:$A$.$P[x]$\ttrbrace}, where $P[x]$ is a formula
+\hbox{\tt\ttlbrace$x$:$A$. $P[x]$\ttrbrace}, where $P[x]$ is a formula
 that may contain free occurrences of~$x$.  It abbreviates the set {\tt
-  Collect($A$,$\lambda x.P[x]$)}, which consists of all $x\in A$ that
+  Collect($A$,$\lambda x. P[x]$)}, which consists of all $x\in A$ that
 satisfy~$P[x]$.  Note that \texttt{Collect} is an unfortunate choice of
 name: some set theories adopt a set-formation principle, related to
 replacement, called collection.
 
 The constant \cdx{Replace} constructs sets by the principle of {\bf
   replacement}.  The syntax
-\hbox{\tt\ttlbrace$y$.$x$:$A$,$Q[x,y]$\ttrbrace} denotes the set {\tt
-  Replace($A$,$\lambda x\,y.Q[x,y]$)}, which consists of all~$y$ such
+\hbox{\tt\ttlbrace$y$. $x$:$A$,$Q[x,y]$\ttrbrace} denotes the set {\tt
+  Replace($A$,$\lambda x\,y. Q[x,y]$)}, which consists of all~$y$ such
 that there exists $x\in A$ satisfying~$Q[x,y]$.  The Replacement Axiom
 has the condition that $Q$ must be single-valued over~$A$: for
 all~$x\in A$ there exists at most one $y$ satisfying~$Q[x,y]$.  A
@@ -297,16 +297,16 @@
 The constant \cdx{RepFun} expresses a special case of replacement,
 where $Q[x,y]$ has the form $y=b[x]$.  Such a $Q$ is trivially
 single-valued, since it is just the graph of the meta-level
-function~$\lambda x.b[x]$.  The resulting set consists of all $b[x]$
+function~$\lambda x. b[x]$.  The resulting set consists of all $b[x]$
 for~$x\in A$.  This is analogous to the \ML{} functional \texttt{map},
 since it applies a function to every element of a set.  The syntax is
-\hbox{\tt\ttlbrace$b[x]$.$x$:$A$\ttrbrace}, which expands to {\tt
-  RepFun($A$,$\lambda x.b[x]$)}.
+\hbox{\tt\ttlbrace$b[x]$. $x$:$A$\ttrbrace}, which expands to {\tt
+  RepFun($A$,$\lambda x. b[x]$)}.
 
 \index{*INT symbol}\index{*UN symbol} 
 General unions and intersections of indexed
 families of sets, namely $\bigcup@{x\in A}B[x]$ and $\bigcap@{x\in A}B[x]$,
-are written \hbox{\tt UN $x$:$A$.$B[x]$} and \hbox{\tt INT $x$:$A$.$B[x]$}.
+are written \hbox{\tt UN $x$:$A$. $B[x]$} and \hbox{\tt INT $x$:$A$. $B[x]$}.
 Their meaning is expressed using \texttt{RepFun} as
 \[
 \bigcup(\{B[x]. x\in A\}) \qquad\hbox{and}\qquad 
@@ -319,7 +319,7 @@
 has `dependent sets') and calls for similar syntactic conventions.  The
 constants~\cdx{Sigma} and~\cdx{Pi} construct general sums and
 products.  Instead of \texttt{Sigma($A$,$B$)} and \texttt{Pi($A$,$B$)} we may write
-\hbox{\tt SUM $x$:$A$.$B[x]$} and \hbox{\tt PROD $x$:$A$.$B[x]$}.  
+\hbox{\tt SUM $x$:$A$. $B[x]$} and \hbox{\tt PROD $x$:$A$. $B[x]$}.  
 \index{*SUM symbol}\index{*PROD symbol}%
 The special cases as \hbox{\tt$A$*$B$} and \hbox{\tt$A$->$B$} abbreviate
 general sums and products over a constant family.\footnote{Unlike normal
@@ -331,28 +331,28 @@
 As mentioned above, whenever the axioms assert the existence and uniqueness
 of a set, Isabelle's set theory declares a constant for that set.  These
 constants can express the {\bf definite description} operator~$\iota
-x.P[x]$, which stands for the unique~$a$ satisfying~$P[a]$, if such exists.
+x. P[x]$, which stands for the unique~$a$ satisfying~$P[a]$, if such exists.
 Since all terms in {\ZF} denote something, a description is always
 meaningful, but we do not know its value unless $P[x]$ defines it uniquely.
 Using the constant~\cdx{The}, we may write descriptions as {\tt
-  The($\lambda x.P[x]$)} or use the syntax \hbox{\tt THE $x$.$P[x]$}.
+  The($\lambda x. P[x]$)} or use the syntax \hbox{\tt THE $x$. $P[x]$}.
 
 \index{*lam symbol}
-Function sets may be written in $\lambda$-notation; $\lambda x\in A.b[x]$
+Function sets may be written in $\lambda$-notation; $\lambda x\in A. b[x]$
 stands for the set of all pairs $\pair{x,b[x]}$ for $x\in A$.  In order for
 this to be a set, the function's domain~$A$ must be given.  Using the
 constant~\cdx{Lambda}, we may express function sets as {\tt
-Lambda($A$,$\lambda x.b[x]$)} or use the syntax \hbox{\tt lam $x$:$A$.$b[x]$}.
+Lambda($A$,$\lambda x. b[x]$)} or use the syntax \hbox{\tt lam $x$:$A$. $b[x]$}.
 
 Isabelle's set theory defines two {\bf bounded quantifiers}:
 \begin{eqnarray*}
-   \forall x\in A.P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
-   \exists x\in A.P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
+   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
+   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
 \end{eqnarray*}
 The constants~\cdx{Ball} and~\cdx{Bex} are defined
 accordingly.  Instead of \texttt{Ball($A$,$P$)} and \texttt{Bex($A$,$P$)} we may
 write
-\hbox{\tt ALL $x$:$A$.$P[x]$} and \hbox{\tt EX $x$:$A$.$P[x]$}.
+\hbox{\tt ALL $x$:$A$. $P[x]$} and \hbox{\tt EX $x$:$A$. $P[x]$}.
 
 
 %%%% ZF.thy
@@ -376,7 +376,7 @@
 \subcaption{The Zermelo-Fraenkel Axioms}
 
 \tdx{Replace_def}  Replace(A,P) == 
-                   PrimReplace(A, \%x y. (EX!z.P(x,z)) & P(x,y))
+                   PrimReplace(A, \%x y. (EX!z. P(x,z)) & P(x,y))
 \tdx{RepFun_def}   RepFun(A,f)  == {\ttlbrace}y . x:A, y=f(x)\ttrbrace
 \tdx{the_def}      The(P)       == Union({\ttlbrace}y . x:{\ttlbrace}0{\ttrbrace}, P(y){\ttrbrace})
 \tdx{if_def}       if(P,a,b)    == THE z. P & z=a | ~P & z=b
@@ -404,8 +404,8 @@
 
 \tdx{Pair_def}       <a,b>      == {\ttlbrace}{\ttlbrace}a,a{\ttrbrace}, {\ttlbrace}a,b{\ttrbrace}{\ttrbrace}
 \tdx{split_def}      split(c,p) == THE y. EX a b. p=<a,b> & y=c(a,b)
-\tdx{fst_def}        fst(A)     == split(\%x y.x, p)
-\tdx{snd_def}        snd(A)     == split(\%x y.y, p)
+\tdx{fst_def}        fst(A)     == split(\%x y. x, p)
+\tdx{snd_def}        snd(A)     == split(\%x y. y, p)
 \tdx{Sigma_def}      Sigma(A,B) == UN x:A. UN y:B(x). {\ttlbrace}<x,y>{\ttrbrace}
 \subcaption{Ordered pairs and Cartesian products}
 
@@ -420,7 +420,7 @@
 \tdx{lam_def}    Lambda(A,b) == {\ttlbrace}<x,b(x)> . x:A{\ttrbrace}
 \tdx{apply_def}  f`a         == THE y. <a,y> : f
 \tdx{Pi_def}     Pi(A,B) == {\ttlbrace}f: Pow(Sigma(A,B)). ALL x:A. EX! y. <x,y>: f{\ttrbrace}
-\tdx{restrict_def}   restrict(f,A) == lam x:A.f`x
+\tdx{restrict_def}   restrict(f,A) == lam x:A. f`x
 \subcaption{Functions and general product}
 \end{ttbox}
 \caption{Further definitions of {\ZF}} \label{zf-defs}
@@ -441,7 +441,7 @@
 subject to the condition that $P(x,y)$ is single-valued for all~$x\in A$.
 The Isabelle theory defines \cdx{Replace} to apply
 \cdx{PrimReplace} to the single-valued part of~$P$, namely
-\[ (\exists!z.P(x,z)) \conj P(x,y). \]
+\[ (\exists!z. P(x,z)) \conj P(x,y). \]
 Thus $y\in \texttt{Replace}(A,P)$ if and only if there is some~$x$ such that
 $P(x,-)$ holds uniquely for~$y$.  Because the equivalence is unconditional,
 \texttt{Replace} is much easier to use than \texttt{PrimReplace}; it defines the
@@ -493,14 +493,14 @@
             (ALL x:A. P(x)) <-> (ALL x:A'. P'(x))
 
 \tdx{bexI}        [| P(x);  x: A |] ==> EX x:A. P(x)
-\tdx{bexCI}       [| ALL x:A. ~P(x) ==> P(a);  a: A |] ==> EX x:A.P(x)
+\tdx{bexCI}       [| ALL x:A. ~P(x) ==> P(a);  a: A |] ==> EX x:A. P(x)
 \tdx{bexE}        [| EX x:A. P(x);  !!x. [| x:A; P(x) |] ==> Q |] ==> Q
 
 \tdx{bex_cong}    [| A=A';  !!x. x:A' ==> P(x) <-> P'(x) |] ==> 
             (EX x:A. P(x)) <-> (EX x:A'. P'(x))
 \subcaption{Bounded quantifiers}
 
-\tdx{subsetI}       (!!x.x:A ==> x:B) ==> A <= B
+\tdx{subsetI}       (!!x. x:A ==> x:B) ==> A <= B
 \tdx{subsetD}       [| A <= B;  c:A |] ==> c:B
 \tdx{subsetCE}      [| A <= B;  ~(c:A) ==> P;  c:B ==> P |] ==> P
 \tdx{subset_refl}   A <= A
@@ -768,7 +768,7 @@
 
 \tdx{fst_conv}        fst(<a,b>) = a
 \tdx{snd_conv}        snd(<a,b>) = b
-\tdx{split}           split(\%x y.c(x,y), <a,b>) = c(a,b)
+\tdx{split}           split(\%x y. c(x,y), <a,b>) = c(a,b)
 
 \tdx{SigmaI}          [| a:A;  b:B(a) |] ==> <a,b> : Sigma(A,B)
 
@@ -803,12 +803,12 @@
 
 In addition, it is possible to use tuples as patterns in abstractions:
 \begin{center}
-{\tt\%<$x$,$y$>.$t$} \quad stands for\quad \texttt{split(\%$x$ $y$.$t$)}
+{\tt\%<$x$,$y$>. $t$} \quad stands for\quad \texttt{split(\%$x$ $y$. $t$)}
 \end{center}
 Nested patterns are translated recursively:
-{\tt\%<$x$,$y$,$z$>.$t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>.$t$} $\leadsto$
-\texttt{split(\%$x$.\%<$y$,$z$>.$t$)} $\leadsto$ \texttt{split(\%$x$.split(\%$y$
-  $z$.$t$))}.  The reverse translation is performed upon printing.
+{\tt\%<$x$,$y$,$z$>. $t$} $\leadsto$ {\tt\%<$x$,<$y$,$z$>>. $t$} $\leadsto$
+\texttt{split(\%$x$.\%<$y$,$z$>. $t$)} $\leadsto$ \texttt{split(\%$x$. split(\%$y$
+  $z$. $t$))}.  The reverse translation is performed upon printing.
 \begin{warn}
   The translation between patterns and \texttt{split} is performed automatically
   by the parser and printer.  Thus the internal and external form of a term
@@ -918,9 +918,9 @@
 \tdx{lamE}         [| p: (lam x:A. b(x));  !!x.[| x:A; p=<x,b(x)> |] ==> P 
              |] ==>  P
 
-\tdx{lam_type}     [| !!x. x:A ==> b(x): B(x) |] ==> (lam x:A.b(x)) : Pi(A,B)
+\tdx{lam_type}     [| !!x. x:A ==> b(x): B(x) |] ==> (lam x:A. b(x)) : Pi(A,B)
 
-\tdx{beta}         a : A ==> (lam x:A.b(x)) ` a = b(a)
+\tdx{beta}         a : A ==> (lam x:A. b(x)) ` a = b(a)
 \tdx{eta}          f : Pi(A,B) ==> (lam x:A. f`x) = f
 \end{ttbox}
 \caption{$\lambda$-abstraction} \label{zf-lam}
@@ -1274,8 +1274,8 @@
 \tdx{rec_def}       rec(k,a,b) ==  
               transrec(k, \%n f. nat_case(a, \%m. b(m, f`m), n))
 
-\tdx{add_def}       m#+n    == rec(m, n, \%u v.succ(v))
-\tdx{diff_def}      m#-n    == rec(n, m, \%u v. rec(v, 0, \%x y.x))
+\tdx{add_def}       m#+n    == rec(m, n, \%u v. succ(v))
+\tdx{diff_def}      m#-n    == rec(n, m, \%u v. rec(v, 0, \%x y. x))
 \tdx{mult_def}      m#*n    == rec(m, 0, \%u v. n #+ v)
 \tdx{mod_def}       m mod n == transrec(m, \%j f. if(j:n, j, f`(j#-n)))
 \tdx{div_def}       m div n == transrec(m, \%j f. if(j:n, 0, succ(f`(j#-n))))
@@ -1375,7 +1375,7 @@
 \underscoreon %%because @ is used here
 \begin{ttbox}
 \tdx{list_rec_def}    list_rec(l,c,h) == 
-                Vrec(l, \%l g.list_case(c, \%x xs. h(x, xs, g`xs), l))
+                Vrec(l, \%l g. list_case(c, \%x xs. h(x, xs, g`xs), l))
 
 \tdx{map_def}         map(f,l)  == list_rec(l,  0,  \%x xs r. <f(x), r>)
 \tdx{length_def}      length(l) == list_rec(l,  0,  \%x xs r. succ(r))
@@ -1401,8 +1401,8 @@
 \tdx{list_rec_Nil}    list_rec(Nil,c,h) = c
 \tdx{list_rec_Cons}   list_rec(Cons(a,l), c, h) = h(a, l, list_rec(l,c,h))
 
-\tdx{map_ident}       l: list(A) ==> map(\%u.u, l) = l
-\tdx{map_compose}     l: list(A) ==> map(h, map(j,l)) = map(\%u.h(j(u)), l)
+\tdx{map_ident}       l: list(A) ==> map(\%u. u, l) = l
+\tdx{map_compose}     l: list(A) ==> map(h, map(j,l)) = map(\%u. h(j(u)), l)
 \tdx{map_app_distrib} xs: list(A) ==> map(h, xs@ys) = map(h,xs) @ map(h,ys)
 \tdx{map_type}
     [| l: list(A);  !!x. x: A ==> h(x): B |] ==> map(h,l) : list(B)
@@ -1505,8 +1505,8 @@
 
 The extraction of rewrite rules takes set theory primitives into account.
 It can strip bounded universal quantifiers from a formula; for example,
-${\forall x\in A.f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
-f(x)=g(x)$.  Given $a\in\{x\in A.P(x)\}$ it extracts rewrite rules from
+${\forall x\in A. f(x)=g(x)}$ yields the conditional rewrite rule $x\in A \Imp
+f(x)=g(x)$.  Given $a\in\{x\in A. P(x)\}$ it extracts rewrite rules from
 $a\in A$ and~$P(a)$.  It can also break down $a\in A\int B$ and $a\in A-B$.
 
 The default simplification set contains congruence rules for
@@ -1611,7 +1611,7 @@
 We enter the goal and make the first step, which breaks the equation into
 two inclusions by extensionality:\index{*equalityI theorem}
 \begin{ttbox}
-goal thy "Pow(A Int B) = Pow(A) Int Pow(B)";
+Goal "Pow(A Int B) = Pow(A) Int Pow(B)";
 {\out Level 0}
 {\out Pow(A Int B) = Pow(A) Int Pow(B)}
 {\out  1. Pow(A Int B) = Pow(A) Int Pow(B)}
@@ -1725,16 +1725,15 @@
 ${C\subseteq D}$ implies $\bigcup(C)\subseteq \bigcup(D)$.  To begin, we
 tackle the inclusion using \tdx{subsetI}:
 \begin{ttbox}
-val [prem] = goal thy "C<=D ==> Union(C) <= Union(D)";
+Goal "C<=D ==> Union(C) <= Union(D)";
 {\out Level 0}
-{\out Union(C) <= Union(D)}
-{\out  1. Union(C) <= Union(D)}
-{\out val prem = "C <= D  [C <= D]" : thm}
+{\out C <= D ==> Union(C) <= Union(D)}
+{\out  1. C <= D ==> Union(C) <= Union(D)}
 \ttbreak
 by (resolve_tac [subsetI] 1);
 {\out Level 1}
-{\out Union(C) <= Union(D)}
-{\out  1. !!x. x : Union(C) ==> x : Union(D)}
+{\out C <= D ==> Union(C) <= Union(D)}
+{\out  1. !!x. [| C <= D; x : Union(C) |] ==> x : Union(D)}
 \end{ttbox}
 Big union is like an existential quantifier --- the occurrence in the
 assumptions must be eliminated early, since it creates parameters.
@@ -1742,8 +1741,8 @@
 \begin{ttbox}
 by (eresolve_tac [UnionE] 1);
 {\out Level 2}
-{\out Union(C) <= Union(D)}
-{\out  1. !!x B. [| x : B; B : C |] ==> x : Union(D)}
+{\out C <= D ==> Union(C) <= Union(D)}
+{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> x : Union(D)}
 \end{ttbox}
 Now we may apply \tdx{UnionI}, which creates an unknown involving the
 parameters.  To show $x\in \bigcup(D)$ it suffices to show that $x$ belongs
@@ -1751,58 +1750,48 @@
 \begin{ttbox}
 by (resolve_tac [UnionI] 1);
 {\out Level 3}
-{\out Union(C) <= Union(D)}
-{\out  1. !!x B. [| x : B; B : C |] ==> ?B2(x,B) : D}
-{\out  2. !!x B. [| x : B; B : C |] ==> x : ?B2(x,B)}
+{\out C <= D ==> Union(C) <= Union(D)}
+{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> ?B2(x,B) : D}
+{\out  2. !!x B. [| C <= D; x : B; B : C |] ==> x : ?B2(x,B)}
 \end{ttbox}
-Combining \tdx{subsetD} with the premise $C\subseteq D$ yields 
-$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1:
+Combining \tdx{subsetD} with the assumption $C\subseteq D$ yields 
+$\Var{a}\in C \Imp \Var{a}\in D$, which reduces subgoal~1.  Note that
+\texttt{eresolve_tac} has removed that assumption.
 \begin{ttbox}
-by (resolve_tac [prem RS subsetD] 1);
+by (eresolve_tac [subsetD] 1);
 {\out Level 4}
-{\out Union(C) <= Union(D)}
+{\out C <= D ==> Union(C) <= Union(D)}
 {\out  1. !!x B. [| x : B; B : C |] ==> ?B2(x,B) : C}
-{\out  2. !!x B. [| x : B; B : C |] ==> x : ?B2(x,B)}
+{\out  2. !!x B. [| C <= D; x : B; B : C |] ==> x : ?B2(x,B)}
 \end{ttbox}
-The rest is routine.  Note how~$\Var{B2}(x,B)$ is instantiated.
+The rest is routine.  Observe how~$\Var{B2}(x,B)$ is instantiated.
 \begin{ttbox}
 by (assume_tac 1);
 {\out Level 5}
-{\out Union(C) <= Union(D)}
-{\out  1. !!x B. [| x : B; B : C |] ==> x : B}
+{\out C <= D ==> Union(C) <= Union(D)}
+{\out  1. !!x B. [| C <= D; x : B; B : C |] ==> x : B}
 by (assume_tac 1);
 {\out Level 6}
-{\out Union(C) <= Union(D)}
+{\out C <= D ==> Union(C) <= Union(D)}
 {\out No subgoals!}
 \end{ttbox}
-Again, \ttindex{Blast_tac} can prove the theorem in one
-step, provided we somehow supply it with~\texttt{prem}.  We can add
-\hbox{\tt prem RS subsetD} to the claset as an introduction rule:
+Again, \ttindex{Blast_tac} can prove the theorem in one step.
 \begin{ttbox}
-by (blast_tac (claset() addIs [prem RS subsetD]) 1);
+by (Blast_tac 1);
 {\out Depth = 0}
 {\out Depth = 1}
 {\out Depth = 2}
 {\out Level 1}
-{\out Union(C) <= Union(D)}
+{\out C <= D ==> Union(C) <= Union(D)}
 {\out No subgoals!}
 \end{ttbox}
-As an alternative, we could add premise to the assumptions, either using
-\ttindex{cut_facts_tac} or by stating the original goal using~\texttt{!!}:
-\begin{ttbox}
-goal thy "!!C D. C<=D ==> Union(C) <= Union(D)";
-{\out Level 0}
-{\out Union(C) <= Union(D)}
-{\out  1. !!C D. C <= D ==> Union(C) <= Union(D)}
-by (Blast_tac 1);
-\end{ttbox}
 
 The file \texttt{ZF/equalities.ML} has many similar proofs.  Reasoning about
 general intersection can be difficult because of its anomalous behaviour on
 the empty set.  However, \ttindex{Blast_tac} copes well with these.  Here is
 a typical example, borrowed from Devlin~\cite[page 12]{devlin79}:
 \begin{ttbox}
-a:C ==> (INT x:C. A(x) Int B(x)) = (INT x:C.A(x)) Int (INT x:C.B(x))
+a:C ==> (INT x:C. A(x) Int B(x)) = (INT x:C. A(x)) Int (INT x:C. B(x))
 \end{ttbox}
 In traditional notation this is
 \[ a\in C \,\Imp\, \inter@{x\in C} \Bigl(A(x) \int B(x)\Bigr) =        
@@ -1819,88 +1808,82 @@
 functions with disjoint domains~$A$ and~$C$, and if $a\in A$, then
 $(f\un g)`a = f`a$:
 \begin{ttbox}
-val prems = goal thy
-    "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \ttback
+Goal "[| a:A;  f: A->B;  g: C->D;  A Int C = 0 |] ==>  \ttback
 \ttback    (f Un g)`a = f`a";
 {\out Level 0}
-{\out (f Un g) ` a = f ` a}
-{\out  1. (f Un g) ` a = f ` a}
-\end{ttbox}
-Isabelle has produced the output above; the \ML{} top-level now echoes the
-binding of \texttt{prems}.
-\begin{ttbox}
-{\out val prems = ["a : A  [a : A]",}
-{\out              "f : A -> B  [f : A -> B]",}
-{\out              "g : C -> D  [g : C -> D]",}
-{\out              "A Int C = 0  [A Int C = 0]"] : thm list}
+{\out [| a : A; f : A -> B; g : C -> D; A Int C = 0 |]}
+{\out ==> (f Un g) ` a = f ` a}
+{\out  1. [| a : A; f : A -> B; g : C -> D; A Int C = 0 |]}
+{\out     ==> (f Un g) ` a = f ` a}
 \end{ttbox}
 Using \tdx{apply_equality}, we reduce the equality to reasoning about
 ordered pairs.  The second subgoal is to verify that $f\un g$ is a function.
+To save space, the assumptions will be abbreviated below.
 \begin{ttbox}
 by (resolve_tac [apply_equality] 1);
 {\out Level 1}
-{\out (f Un g) ` a = f ` a}
-{\out  1. <a,f ` a> : f Un g}
-{\out  2. f Un g : (PROD x:?A. ?B(x))}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> <a,f ` a> : f Un g}
+{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
 \end{ttbox}
 We must show that the pair belongs to~$f$ or~$g$; by~\tdx{UnI1} we
 choose~$f$:
 \begin{ttbox}
 by (resolve_tac [UnI1] 1);
 {\out Level 2}
-{\out (f Un g) ` a = f ` a}
-{\out  1. <a,f ` a> : f}
-{\out  2. f Un g : (PROD x:?A. ?B(x))}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> <a,f ` a> : f}
+{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
 \end{ttbox}
 To show $\pair{a,f`a}\in f$ we use \tdx{apply_Pair}, which is
 essentially the converse of \tdx{apply_equality}:
 \begin{ttbox}
 by (resolve_tac [apply_Pair] 1);
 {\out Level 3}
-{\out (f Un g) ` a = f ` a}
-{\out  1. f : (PROD x:?A2. ?B2(x))}
-{\out  2. a : ?A2}
-{\out  3. f Un g : (PROD x:?A. ?B(x))}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> f : (PROD x:?A2. ?B2(x))}
+{\out  2. [| \ldots |] ==> a : ?A2}
+{\out  3. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
 \end{ttbox}
-Using the premises $f\in A\to B$ and $a\in A$, we solve the two subgoals
+Using the assumptions $f\in A\to B$ and $a\in A$, we solve the two subgoals
 from \tdx{apply_Pair}.  Recall that a $\Pi$-set is merely a generalized
 function space, and observe that~{\tt?A2} is instantiated to~\texttt{A}.
 \begin{ttbox}
-by (resolve_tac prems 1);
+by (assume_tac 1);
 {\out Level 4}
-{\out (f Un g) ` a = f ` a}
-{\out  1. a : A}
-{\out  2. f Un g : (PROD x:?A. ?B(x))}
-by (resolve_tac prems 1);
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> a : A}
+{\out  2. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
+by (assume_tac 1);
 {\out Level 5}
-{\out (f Un g) ` a = f ` a}
-{\out  1. f Un g : (PROD x:?A. ?B(x))}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> f Un g : (PROD x:?A. ?B(x))}
 \end{ttbox}
 To construct functions of the form $f\un g$, we apply
 \tdx{fun_disjoint_Un}:
 \begin{ttbox}
 by (resolve_tac [fun_disjoint_Un] 1);
 {\out Level 6}
-{\out (f Un g) ` a = f ` a}
-{\out  1. f : ?A3 -> ?B3}
-{\out  2. g : ?C3 -> ?D3}
-{\out  3. ?A3 Int ?C3 = 0}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> f : ?A3 -> ?B3}
+{\out  2. [| \ldots |] ==> g : ?C3 -> ?D3}
+{\out  3. [| \ldots |] ==> ?A3 Int ?C3 = 0}
 \end{ttbox}
-The remaining subgoals are instances of the premises.  Again, observe how
+The remaining subgoals are instances of the assumptions.  Again, observe how
 unknowns are instantiated:
 \begin{ttbox}
-by (resolve_tac prems 1);
+by (assume_tac 1);
 {\out Level 7}
-{\out (f Un g) ` a = f ` a}
-{\out  1. g : ?C3 -> ?D3}
-{\out  2. A Int ?C3 = 0}
-by (resolve_tac prems 1);
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> g : ?C3 -> ?D3}
+{\out  2. [| \ldots |] ==> A Int ?C3 = 0}
+by (assume_tac 1);
 {\out Level 8}
-{\out (f Un g) ` a = f ` a}
-{\out  1. A Int C = 0}
-by (resolve_tac prems 1);
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
+{\out  1. [| \ldots |] ==> A Int C = 0}
+by (assume_tac 1);
 {\out Level 9}
-{\out (f Un g) ` a = f ` a}
+{\out [| \ldots |] ==> (f Un g) ` a = f ` a}
 {\out No subgoals!}
 \end{ttbox}
 See the files \texttt{ZF/func.ML} and \texttt{ZF/WF.ML} for more