isar-strip-terminators;
authorwenzelm
Sun Sep 17 22:19:02 2000 +0200 (2000-09-17)
changeset 1000764bf7da1994a
parent 10006 ede5f78b9398
child 10008 61eb9f3aa92a
isar-strip-terminators;
src/HOL/AxClasses/Tutorial/Group.thy
src/HOL/AxClasses/Tutorial/Product.thy
src/HOL/AxClasses/Tutorial/Semigroups.thy
src/HOL/Isar_examples/BasicLogic.thy
src/HOL/Isar_examples/Cantor.thy
src/HOL/Isar_examples/ExprCompiler.thy
src/HOL/Isar_examples/Fibonacci.thy
src/HOL/Isar_examples/Group.thy
src/HOL/Isar_examples/KnasterTarski.thy
src/HOL/Isar_examples/MultisetOrder.thy
src/HOL/Isar_examples/MutilatedCheckerboard.thy
src/HOL/Isar_examples/NestedDatatype.thy
src/HOL/Isar_examples/Peirce.thy
src/HOL/Isar_examples/Puzzle.thy
src/HOL/Isar_examples/Summation.thy
src/HOL/Isar_examples/W_correct.thy
src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy
     1.1 --- a/src/HOL/AxClasses/Tutorial/Group.thy	Sun Sep 17 22:15:08 2000 +0200
     1.2 +++ b/src/HOL/AxClasses/Tutorial/Group.thy	Sun Sep 17 22:19:02 2000 +0200
     1.3 @@ -3,127 +3,127 @@
     1.4      Author:     Markus Wenzel, TU Muenchen
     1.5  *)
     1.6  
     1.7 -theory Group = Main:;
     1.8 +theory Group = Main:
     1.9  
    1.10 -subsection {* Monoids and Groups *};
    1.11 +subsection {* Monoids and Groups *}
    1.12  
    1.13  consts
    1.14    times :: "'a => 'a => 'a"    (infixl "[*]" 70)
    1.15    inverse :: "'a => 'a"
    1.16 -  one :: 'a;
    1.17 +  one :: 'a
    1.18  
    1.19  
    1.20  axclass
    1.21    monoid < "term"
    1.22    assoc:      "(x [*] y) [*] z = x [*] (y [*] z)"
    1.23    left_unit:  "one [*] x = x"
    1.24 -  right_unit: "x [*] one = x";
    1.25 +  right_unit: "x [*] one = x"
    1.26  
    1.27  
    1.28  axclass
    1.29    semigroup < "term"
    1.30 -  assoc: "(x [*] y) [*] z = x [*] (y [*] z)";
    1.31 +  assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
    1.32  
    1.33  axclass
    1.34    group < semigroup
    1.35    left_unit:    "one [*] x = x"
    1.36 -  left_inverse: "inverse x [*] x = one";
    1.37 +  left_inverse: "inverse x [*] x = one"
    1.38  
    1.39  axclass
    1.40    agroup < group
    1.41 -  commute: "x [*] y = y [*] x";
    1.42 +  commute: "x [*] y = y [*] x"
    1.43  
    1.44  
    1.45 -subsection {* Abstract reasoning *};
    1.46 +subsection {* Abstract reasoning *}
    1.47  
    1.48 -theorem group_right_inverse: "x [*] inverse x = (one::'a::group)";
    1.49 -proof -;
    1.50 -  have "x [*] inverse x = one [*] (x [*] inverse x)";
    1.51 -    by (simp only: group.left_unit);
    1.52 -  also; have "... = one [*] x [*] inverse x";
    1.53 -    by (simp only: semigroup.assoc);
    1.54 -  also; have "... = inverse (inverse x) [*] inverse x [*] x [*] inverse x";
    1.55 -    by (simp only: group.left_inverse);
    1.56 -  also; have "... = inverse (inverse x) [*] (inverse x [*] x) [*] inverse x";
    1.57 -    by (simp only: semigroup.assoc);
    1.58 -  also; have "... = inverse (inverse x) [*] one [*] inverse x";
    1.59 -    by (simp only: group.left_inverse);
    1.60 -  also; have "... = inverse (inverse x) [*] (one [*] inverse x)";
    1.61 -    by (simp only: semigroup.assoc);
    1.62 -  also; have "... = inverse (inverse x) [*] inverse x";
    1.63 -    by (simp only: group.left_unit);
    1.64 -  also; have "... = one";
    1.65 -    by (simp only: group.left_inverse);
    1.66 -  finally; show ?thesis; .;
    1.67 -qed;
    1.68 +theorem group_right_inverse: "x [*] inverse x = (one::'a::group)"
    1.69 +proof -
    1.70 +  have "x [*] inverse x = one [*] (x [*] inverse x)"
    1.71 +    by (simp only: group.left_unit)
    1.72 +  also have "... = one [*] x [*] inverse x"
    1.73 +    by (simp only: semigroup.assoc)
    1.74 +  also have "... = inverse (inverse x) [*] inverse x [*] x [*] inverse x"
    1.75 +    by (simp only: group.left_inverse)
    1.76 +  also have "... = inverse (inverse x) [*] (inverse x [*] x) [*] inverse x"
    1.77 +    by (simp only: semigroup.assoc)
    1.78 +  also have "... = inverse (inverse x) [*] one [*] inverse x"
    1.79 +    by (simp only: group.left_inverse)
    1.80 +  also have "... = inverse (inverse x) [*] (one [*] inverse x)"
    1.81 +    by (simp only: semigroup.assoc)
    1.82 +  also have "... = inverse (inverse x) [*] inverse x"
    1.83 +    by (simp only: group.left_unit)
    1.84 +  also have "... = one"
    1.85 +    by (simp only: group.left_inverse)
    1.86 +  finally show ?thesis .
    1.87 +qed
    1.88  
    1.89 -theorem group_right_unit: "x [*] one = (x::'a::group)";
    1.90 -proof -;
    1.91 -  have "x [*] one = x [*] (inverse x [*] x)";
    1.92 -    by (simp only: group.left_inverse);
    1.93 -  also; have "... = x [*] inverse x [*] x";
    1.94 -    by (simp only: semigroup.assoc);
    1.95 -  also; have "... = one [*] x";
    1.96 -    by (simp only: group_right_inverse);
    1.97 -  also; have "... = x";
    1.98 -    by (simp only: group.left_unit);
    1.99 -  finally; show ?thesis; .;
   1.100 -qed;
   1.101 +theorem group_right_unit: "x [*] one = (x::'a::group)"
   1.102 +proof -
   1.103 +  have "x [*] one = x [*] (inverse x [*] x)"
   1.104 +    by (simp only: group.left_inverse)
   1.105 +  also have "... = x [*] inverse x [*] x"
   1.106 +    by (simp only: semigroup.assoc)
   1.107 +  also have "... = one [*] x"
   1.108 +    by (simp only: group_right_inverse)
   1.109 +  also have "... = x"
   1.110 +    by (simp only: group.left_unit)
   1.111 +  finally show ?thesis .
   1.112 +qed
   1.113  
   1.114  
   1.115 -subsection {* Abstract instantiation *};
   1.116 +subsection {* Abstract instantiation *}
   1.117  
   1.118 -instance monoid < semigroup;
   1.119 -proof intro_classes;
   1.120 -  fix x y z :: "'a::monoid";
   1.121 -  show "x [*] y [*] z = x [*] (y [*] z)";
   1.122 -    by (rule monoid.assoc);
   1.123 -qed;
   1.124 +instance monoid < semigroup
   1.125 +proof intro_classes
   1.126 +  fix x y z :: "'a::monoid"
   1.127 +  show "x [*] y [*] z = x [*] (y [*] z)"
   1.128 +    by (rule monoid.assoc)
   1.129 +qed
   1.130  
   1.131 -instance group < monoid;
   1.132 -proof intro_classes;
   1.133 -  fix x y z :: "'a::group";
   1.134 -  show "x [*] y [*] z = x [*] (y [*] z)";
   1.135 -    by (rule semigroup.assoc);
   1.136 -  show "one [*] x = x";
   1.137 -    by (rule group.left_unit);
   1.138 -  show "x [*] one = x";
   1.139 -    by (rule group_right_unit);
   1.140 -qed;
   1.141 +instance group < monoid
   1.142 +proof intro_classes
   1.143 +  fix x y z :: "'a::group"
   1.144 +  show "x [*] y [*] z = x [*] (y [*] z)"
   1.145 +    by (rule semigroup.assoc)
   1.146 +  show "one [*] x = x"
   1.147 +    by (rule group.left_unit)
   1.148 +  show "x [*] one = x"
   1.149 +    by (rule group_right_unit)
   1.150 +qed
   1.151  
   1.152  
   1.153 -subsection {* Concrete instantiation *};
   1.154 +subsection {* Concrete instantiation *}
   1.155  
   1.156  defs (overloaded)
   1.157    times_bool_def:   "x [*] y == x ~= (y::bool)"
   1.158    inverse_bool_def: "inverse x == x::bool"
   1.159 -  unit_bool_def:    "one == False";
   1.160 +  unit_bool_def:    "one == False"
   1.161  
   1.162 -instance bool :: agroup;
   1.163 +instance bool :: agroup
   1.164  proof (intro_classes,
   1.165 -    unfold times_bool_def inverse_bool_def unit_bool_def);
   1.166 -  fix x y z;
   1.167 -  show "((x ~= y) ~= z) = (x ~= (y ~= z))"; by blast;
   1.168 -  show "(False ~= x) = x"; by blast;
   1.169 -  show "(x ~= x) = False"; by blast;
   1.170 -  show "(x ~= y) = (y ~= x)"; by blast;
   1.171 -qed;
   1.172 +    unfold times_bool_def inverse_bool_def unit_bool_def)
   1.173 +  fix x y z
   1.174 +  show "((x ~= y) ~= z) = (x ~= (y ~= z))" by blast
   1.175 +  show "(False ~= x) = x" by blast
   1.176 +  show "(x ~= x) = False" by blast
   1.177 +  show "(x ~= y) = (y ~= x)" by blast
   1.178 +qed
   1.179  
   1.180  
   1.181 -subsection {* Lifting and Functors *};
   1.182 +subsection {* Lifting and Functors *}
   1.183  
   1.184  defs (overloaded)
   1.185 -  times_prod_def: "p [*] q == (fst p [*] fst q, snd p [*] snd q)";
   1.186 +  times_prod_def: "p [*] q == (fst p [*] fst q, snd p [*] snd q)"
   1.187  
   1.188 -instance * :: (semigroup, semigroup) semigroup;
   1.189 -proof (intro_classes, unfold times_prod_def);
   1.190 -  fix p q r :: "'a::semigroup * 'b::semigroup";
   1.191 +instance * :: (semigroup, semigroup) semigroup
   1.192 +proof (intro_classes, unfold times_prod_def)
   1.193 +  fix p q r :: "'a::semigroup * 'b::semigroup"
   1.194    show
   1.195      "(fst (fst p [*] fst q, snd p [*] snd q) [*] fst r,
   1.196        snd (fst p [*] fst q, snd p [*] snd q) [*] snd r) =
   1.197         (fst p [*] fst (fst q [*] fst r, snd q [*] snd r),
   1.198 -        snd p [*] snd (fst q [*] fst r, snd q [*] snd r))";
   1.199 -    by (simp add: semigroup.assoc);
   1.200 -qed;
   1.201 +        snd p [*] snd (fst q [*] fst r, snd q [*] snd r))"
   1.202 +    by (simp add: semigroup.assoc)
   1.203 +qed
   1.204  
   1.205 -end;
   1.206 +end
     2.1 --- a/src/HOL/AxClasses/Tutorial/Product.thy	Sun Sep 17 22:15:08 2000 +0200
     2.2 +++ b/src/HOL/AxClasses/Tutorial/Product.thy	Sun Sep 17 22:19:02 2000 +0200
     2.3 @@ -3,16 +3,16 @@
     2.4      Author:     Markus Wenzel, TU Muenchen
     2.5  *)
     2.6  
     2.7 -theory Product = Main:;
     2.8 +theory Product = Main:
     2.9  
    2.10  axclass
    2.11 -  product < "term";
    2.12 +  product < "term"
    2.13  consts
    2.14 -  product :: "'a::product => 'a => 'a"    (infixl "[*]" 70);
    2.15 +  product :: "'a::product => 'a => 'a"    (infixl "[*]" 70)
    2.16  
    2.17 -instance bool :: product;
    2.18 -  by intro_classes;
    2.19 +instance bool :: product
    2.20 +  by intro_classes
    2.21  defs (overloaded)
    2.22 -  product_bool_def: "x [*] y == x & y";
    2.23 +  product_bool_def: "x [*] y == x & y"
    2.24  
    2.25 -end;
    2.26 +end
     3.1 --- a/src/HOL/AxClasses/Tutorial/Semigroups.thy	Sun Sep 17 22:15:08 2000 +0200
     3.2 +++ b/src/HOL/AxClasses/Tutorial/Semigroups.thy	Sun Sep 17 22:19:02 2000 +0200
     3.3 @@ -3,18 +3,18 @@
     3.4      Author:     Markus Wenzel, TU Muenchen
     3.5  *)
     3.6  
     3.7 -theory Semigroups = Main:;
     3.8 +theory Semigroups = Main:
     3.9  
    3.10  consts
    3.11 -  times :: "'a => 'a => 'a"    (infixl "[*]" 70);
    3.12 +  times :: "'a => 'a => 'a"    (infixl "[*]" 70)
    3.13  axclass
    3.14    semigroup < "term"
    3.15 -  assoc: "(x [*] y) [*] z = x [*] (y [*] z)";
    3.16 +  assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
    3.17  
    3.18  consts
    3.19 -  plus :: "'a => 'a => 'a"    (infixl "[+]" 70);
    3.20 +  plus :: "'a => 'a => 'a"    (infixl "[+]" 70)
    3.21  axclass
    3.22    plus_semigroup < "term"
    3.23 -  assoc: "(x [+] y) [+] z = x [+] (y [+] z)";
    3.24 +  assoc: "(x [+] y) [+] z = x [+] (y [+] z)"
    3.25  
    3.26 -end;
    3.27 +end
     4.1 --- a/src/HOL/Isar_examples/BasicLogic.thy	Sun Sep 17 22:15:08 2000 +0200
     4.2 +++ b/src/HOL/Isar_examples/BasicLogic.thy	Sun Sep 17 22:19:02 2000 +0200
     4.3 @@ -5,52 +5,52 @@
     4.4  Basic propositional and quantifier reasoning.
     4.5  *)
     4.6  
     4.7 -header {* Basic logical reasoning *};
     4.8 +header {* Basic logical reasoning *}
     4.9  
    4.10 -theory BasicLogic = Main:;
    4.11 +theory BasicLogic = Main:
    4.12  
    4.13  
    4.14 -subsection {* Pure backward reasoning *};
    4.15 +subsection {* Pure backward reasoning *}
    4.16  
    4.17  text {*
    4.18   In order to get a first idea of how Isabelle/Isar proof documents may
    4.19   look like, we consider the propositions $I$, $K$, and $S$.  The
    4.20   following (rather explicit) proofs should require little extra
    4.21   explanations.
    4.22 -*};
    4.23 +*}
    4.24  
    4.25 -lemma I: "A --> A";
    4.26 -proof;
    4.27 -  assume A;
    4.28 -  show A; by assumption;
    4.29 -qed;
    4.30 +lemma I: "A --> A"
    4.31 +proof
    4.32 +  assume A
    4.33 +  show A by assumption
    4.34 +qed
    4.35  
    4.36 -lemma K: "A --> B --> A";
    4.37 -proof;
    4.38 -  assume A;
    4.39 -  show "B --> A";
    4.40 -  proof;
    4.41 -    show A; by assumption;
    4.42 -  qed;
    4.43 -qed;
    4.44 +lemma K: "A --> B --> A"
    4.45 +proof
    4.46 +  assume A
    4.47 +  show "B --> A"
    4.48 +  proof
    4.49 +    show A by assumption
    4.50 +  qed
    4.51 +qed
    4.52  
    4.53 -lemma S: "(A --> B --> C) --> (A --> B) --> A --> C";
    4.54 -proof;
    4.55 -  assume "A --> B --> C";
    4.56 -  show "(A --> B) --> A --> C";
    4.57 -  proof;
    4.58 -    assume "A --> B";
    4.59 -    show "A --> C";
    4.60 -    proof;
    4.61 -      assume A;
    4.62 -      show C;
    4.63 -      proof (rule mp);
    4.64 -	show "B --> C"; by (rule mp);
    4.65 -        show B; by (rule mp);
    4.66 -      qed;
    4.67 -    qed;
    4.68 -  qed;
    4.69 -qed;
    4.70 +lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
    4.71 +proof
    4.72 +  assume "A --> B --> C"
    4.73 +  show "(A --> B) --> A --> C"
    4.74 +  proof
    4.75 +    assume "A --> B"
    4.76 +    show "A --> C"
    4.77 +    proof
    4.78 +      assume A
    4.79 +      show C
    4.80 +      proof (rule mp)
    4.81 +	show "B --> C" by (rule mp)
    4.82 +        show B by (rule mp)
    4.83 +      qed
    4.84 +    qed
    4.85 +  qed
    4.86 +qed
    4.87  
    4.88  text {*
    4.89   Isar provides several ways to fine-tune the reasoning, avoiding
    4.90 @@ -59,13 +59,13 @@
    4.91   way, even without referring to any automated proof tools yet.
    4.92  
    4.93   First of all, proof by assumption may be abbreviated as a single dot.
    4.94 -*};
    4.95 +*}
    4.96  
    4.97 -lemma "A --> A";
    4.98 -proof;
    4.99 -  assume A;
   4.100 -  show A; .;
   4.101 -qed;
   4.102 +lemma "A --> A"
   4.103 +proof
   4.104 +  assume A
   4.105 +  show A .
   4.106 +qed
   4.107  
   4.108  text {*
   4.109   In fact, concluding any (sub-)proof already involves solving any
   4.110 @@ -73,11 +73,11 @@
   4.111   trivial operation, as proof by assumption may involve full
   4.112   higher-order unification.}.  Thus we may skip the rather vacuous body
   4.113   of the above proof as well.
   4.114 -*};
   4.115 +*}
   4.116  
   4.117 -lemma "A --> A";
   4.118 -proof;
   4.119 -qed;
   4.120 +lemma "A --> A"
   4.121 +proof
   4.122 +qed
   4.123  
   4.124  text {*
   4.125   Note that the \isacommand{proof} command refers to the $\idt{rule}$
   4.126 @@ -85,22 +85,22 @@
   4.127   single rule, as determined from the syntactic form of the statements
   4.128   involved.  The \isacommand{by} command abbreviates any proof with
   4.129   empty body, so the proof may be further pruned.
   4.130 -*};
   4.131 +*}
   4.132  
   4.133 -lemma "A --> A";
   4.134 -  by rule;
   4.135 +lemma "A --> A"
   4.136 +  by rule
   4.137  
   4.138  text {*
   4.139   Proof by a single rule may be abbreviated as double-dot.
   4.140 -*};
   4.141 +*}
   4.142  
   4.143 -lemma "A --> A"; ..;
   4.144 +lemma "A --> A" ..
   4.145  
   4.146  text {*
   4.147   Thus we have arrived at an adequate representation of the proof of a
   4.148   tautology that holds by a single standard rule.\footnote{Apparently,
   4.149   the rule here is implication introduction.}
   4.150 -*};
   4.151 +*}
   4.152  
   4.153  text {*
   4.154   Let us also reconsider $K$.  Its statement is composed of iterated
   4.155 @@ -110,20 +110,20 @@
   4.156   The $\idt{intro}$ proof method repeatedly decomposes a goal's
   4.157   conclusion.\footnote{The dual method is $\idt{elim}$, acting on a
   4.158   goal's premises.}
   4.159 -*};
   4.160 +*}
   4.161  
   4.162 -lemma "A --> B --> A";
   4.163 -proof intro;
   4.164 -  assume A;
   4.165 -  show A; .;
   4.166 -qed;
   4.167 +lemma "A --> B --> A"
   4.168 +proof intro
   4.169 +  assume A
   4.170 +  show A .
   4.171 +qed
   4.172  
   4.173  text {*
   4.174   Again, the body may be collapsed.
   4.175 -*};
   4.176 +*}
   4.177  
   4.178 -lemma "A --> B --> A";
   4.179 -  by intro;
   4.180 +lemma "A --> B --> A"
   4.181 +  by intro
   4.182  
   4.183  text {*
   4.184   Just like $\idt{rule}$, the $\idt{intro}$ and $\idt{elim}$ proof
   4.185 @@ -140,10 +140,10 @@
   4.186   terminal steps that solve a goal completely are usually performed by
   4.187   actual automated proof methods (such as
   4.188   \isacommand{by}~$\idt{blast}$).
   4.189 -*};
   4.190 +*}
   4.191  
   4.192  
   4.193 -subsection {* Variations of backward vs.\ forward reasoning *};
   4.194 +subsection {* Variations of backward vs.\ forward reasoning *}
   4.195  
   4.196  text {*
   4.197   Certainly, any proof may be performed in backward-style only.  On the
   4.198 @@ -154,17 +154,17 @@
   4.199   A$.
   4.200  
   4.201   The first version is purely backward.
   4.202 -*};
   4.203 +*}
   4.204  
   4.205 -lemma "A & B --> B & A";
   4.206 -proof;
   4.207 -  assume "A & B";
   4.208 -  show "B & A";
   4.209 -  proof;
   4.210 -    show B; by (rule conjunct2);
   4.211 -    show A; by (rule conjunct1);
   4.212 -  qed;
   4.213 -qed;
   4.214 +lemma "A & B --> B & A"
   4.215 +proof
   4.216 +  assume "A & B"
   4.217 +  show "B & A"
   4.218 +  proof
   4.219 +    show B by (rule conjunct2)
   4.220 +    show A by (rule conjunct1)
   4.221 +  qed
   4.222 +qed
   4.223  
   4.224  text {*
   4.225   Above, the $\idt{conjunct}_{1/2}$ projection rules had to be named
   4.226 @@ -174,17 +174,17 @@
   4.227   current facts, enabling the use of double-dot proofs.  Note that
   4.228   \isacommand{from} already does forward-chaining, involving the
   4.229   \name{conjE} rule here.
   4.230 -*};
   4.231 +*}
   4.232  
   4.233 -lemma "A & B --> B & A";
   4.234 -proof;
   4.235 -  assume "A & B";
   4.236 -  show "B & A";
   4.237 -  proof;
   4.238 -    from prems; show B; ..;
   4.239 -    from prems; show A; ..;
   4.240 -  qed;
   4.241 -qed;
   4.242 +lemma "A & B --> B & A"
   4.243 +proof
   4.244 +  assume "A & B"
   4.245 +  show "B & A"
   4.246 +  proof
   4.247 +    from prems show B ..
   4.248 +    from prems show A ..
   4.249 +  qed
   4.250 +qed
   4.251  
   4.252  text {*
   4.253   In the next version, we move the forward step one level upwards.
   4.254 @@ -194,48 +194,48 @@
   4.255   introduction.  The resulting proof structure directly corresponds to
   4.256   that of the $\name{conjE}$ rule, including the repeated goal
   4.257   proposition that is abbreviated as $\var{thesis}$ below.
   4.258 -*};
   4.259 +*}
   4.260  
   4.261 -lemma "A & B --> B & A";
   4.262 -proof;
   4.263 -  assume "A & B";
   4.264 -  then; show "B & A";
   4.265 -  proof                    -- {* rule \name{conjE} of $A \conj B$ *};
   4.266 -    assume A B;
   4.267 -    show ?thesis; ..       -- {* rule \name{conjI} of $B \conj A$ *};
   4.268 -  qed;
   4.269 -qed;
   4.270 +lemma "A & B --> B & A"
   4.271 +proof
   4.272 +  assume "A & B"
   4.273 +  then show "B & A"
   4.274 +  proof                    -- {* rule \name{conjE} of $A \conj B$ *}
   4.275 +    assume A B
   4.276 +    show ?thesis ..       -- {* rule \name{conjI} of $B \conj A$ *}
   4.277 +  qed
   4.278 +qed
   4.279  
   4.280  text {*
   4.281   In the subsequent version we flatten the structure of the main body
   4.282   by doing forward reasoning all the time.  Only the outermost
   4.283   decomposition step is left as backward.
   4.284 -*};
   4.285 +*}
   4.286  
   4.287 -lemma "A & B --> B & A";
   4.288 -proof;
   4.289 -  assume ab: "A & B";
   4.290 -  from ab; have a: A; ..;
   4.291 -  from ab; have b: B; ..;
   4.292 -  from b a; show "B & A"; ..;
   4.293 -qed;
   4.294 +lemma "A & B --> B & A"
   4.295 +proof
   4.296 +  assume ab: "A & B"
   4.297 +  from ab have a: A ..
   4.298 +  from ab have b: B ..
   4.299 +  from b a show "B & A" ..
   4.300 +qed
   4.301  
   4.302  text {*
   4.303   We can still push forward-reasoning a bit further, even at the risk
   4.304   of getting ridiculous.  Note that we force the initial proof step to
   4.305   do nothing here, by referring to the ``-'' proof method.
   4.306 -*};
   4.307 +*}
   4.308  
   4.309 -lemma "A & B --> B & A";
   4.310 -proof -;
   4.311 -  {;
   4.312 -    assume ab: "A & B";
   4.313 -    from ab; have a: A; ..;
   4.314 -    from ab; have b: B; ..;
   4.315 -    from b a; have "B & A"; ..;
   4.316 -  };
   4.317 -  thus ?thesis; ..         -- {* rule \name{impI} *};
   4.318 -qed;
   4.319 +lemma "A & B --> B & A"
   4.320 +proof -
   4.321 +  {
   4.322 +    assume ab: "A & B"
   4.323 +    from ab have a: A ..
   4.324 +    from ab have b: B ..
   4.325 +    from b a have "B & A" ..
   4.326 +  }
   4.327 +  thus ?thesis ..         -- {* rule \name{impI} *}
   4.328 +qed
   4.329  
   4.330  text {*
   4.331   \medskip With these examples we have shifted through a whole range
   4.332 @@ -252,7 +252,7 @@
   4.333   etc., rules (and methods) on the one hand vs.\ facts on the other
   4.334   hand have to be emphasized in an appropriate way.  This requires the
   4.335   proof writer to develop good taste, and some practice, of course.
   4.336 -*};
   4.337 +*}
   4.338  
   4.339  text {*
   4.340   For our example the most appropriate way of reasoning is probably the
   4.341 @@ -261,47 +261,47 @@
   4.342   abbreviates \isacommand{then}~\isacommand{show}.\footnote{In the same
   4.343   vein, \isacommand{hence} abbreviates
   4.344   \isacommand{then}~\isacommand{have}.}
   4.345 -*};
   4.346 +*}
   4.347  
   4.348 -lemma "A & B --> B & A";
   4.349 -proof;
   4.350 -  assume "A & B";
   4.351 -  thus "B & A";
   4.352 -  proof;
   4.353 -    assume A B;
   4.354 -    show ?thesis; ..;
   4.355 -  qed;
   4.356 -qed;
   4.357 +lemma "A & B --> B & A"
   4.358 +proof
   4.359 +  assume "A & B"
   4.360 +  thus "B & A"
   4.361 +  proof
   4.362 +    assume A B
   4.363 +    show ?thesis ..
   4.364 +  qed
   4.365 +qed
   4.366  
   4.367  
   4.368  
   4.369 -subsection {* A few examples from ``Introduction to Isabelle'' *};
   4.370 +subsection {* A few examples from ``Introduction to Isabelle'' *}
   4.371  
   4.372  text {*
   4.373   We rephrase some of the basic reasoning examples of
   4.374   \cite{isabelle-intro}, using HOL rather than FOL.
   4.375 -*};
   4.376 +*}
   4.377  
   4.378 -subsubsection {* A propositional proof *};
   4.379 +subsubsection {* A propositional proof *}
   4.380  
   4.381  text {*
   4.382   We consider the proposition $P \disj P \impl P$.  The proof below
   4.383   involves forward-chaining from $P \disj P$, followed by an explicit
   4.384   case-analysis on the two \emph{identical} cases.
   4.385 -*};
   4.386 +*}
   4.387  
   4.388 -lemma "P | P --> P";
   4.389 -proof;
   4.390 -  assume "P | P";
   4.391 -  thus P;
   4.392 +lemma "P | P --> P"
   4.393 +proof
   4.394 +  assume "P | P"
   4.395 +  thus P
   4.396    proof                    -- {*
   4.397      rule \name{disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
   4.398 -  *};
   4.399 -    assume P; show P; .;
   4.400 -  next;
   4.401 -    assume P; show P; .;
   4.402 -  qed;
   4.403 -qed;
   4.404 +  *}
   4.405 +    assume P show P .
   4.406 +  next
   4.407 +    assume P show P .
   4.408 +  qed
   4.409 +qed
   4.410  
   4.411  text {*
   4.412   Case splits are \emph{not} hardwired into the Isar language as a
   4.413 @@ -325,34 +325,34 @@
   4.414   \medskip In our example the situation is even simpler, since the two
   4.415   cases actually coincide.  Consequently the proof may be rephrased as
   4.416   follows.
   4.417 -*};
   4.418 +*}
   4.419  
   4.420 -lemma "P | P --> P";
   4.421 -proof;
   4.422 -  assume "P | P";
   4.423 -  thus P;
   4.424 -  proof;
   4.425 -    assume P;
   4.426 -    show P; .;
   4.427 -    show P; .;
   4.428 -  qed;
   4.429 -qed;
   4.430 +lemma "P | P --> P"
   4.431 +proof
   4.432 +  assume "P | P"
   4.433 +  thus P
   4.434 +  proof
   4.435 +    assume P
   4.436 +    show P .
   4.437 +    show P .
   4.438 +  qed
   4.439 +qed
   4.440  
   4.441  text {*
   4.442   Again, the rather vacuous body of the proof may be collapsed.  Thus
   4.443   the case analysis degenerates into two assumption steps, which are
   4.444   implicitly performed when concluding the single rule step of the
   4.445   double-dot proof as follows.
   4.446 -*};
   4.447 +*}
   4.448  
   4.449 -lemma "P | P --> P";
   4.450 -proof;
   4.451 -  assume "P | P";
   4.452 -  thus P; ..;
   4.453 -qed;
   4.454 +lemma "P | P --> P"
   4.455 +proof
   4.456 +  assume "P | P"
   4.457 +  thus P ..
   4.458 +qed
   4.459  
   4.460  
   4.461 -subsubsection {* A quantifier proof *};
   4.462 +subsubsection {* A quantifier proof *}
   4.463  
   4.464  text {*
   4.465   To illustrate quantifier reasoning, let us prove $(\ex x P \ap (f \ap
   4.466 @@ -366,20 +366,20 @@
   4.467   the \isacommand{fix} command augments the context by some new
   4.468   ``arbitrary, but fixed'' element; the \isacommand{is} annotation
   4.469   binds term abbreviations by higher-order pattern matching.
   4.470 -*};
   4.471 +*}
   4.472  
   4.473 -lemma "(EX x. P (f x)) --> (EX x. P x)";
   4.474 -proof;
   4.475 -  assume "EX x. P (f x)";
   4.476 -  thus "EX x. P x";
   4.477 +lemma "(EX x. P (f x)) --> (EX x. P x)"
   4.478 +proof
   4.479 +  assume "EX x. P (f x)"
   4.480 +  thus "EX x. P x"
   4.481    proof (rule exE)             -- {*
   4.482      rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
   4.483 -  *};
   4.484 -    fix a;
   4.485 -    assume "P (f a)" (is "P ?witness");
   4.486 -    show ?thesis; by (rule exI [of P ?witness]);
   4.487 -  qed;
   4.488 -qed;
   4.489 +  *}
   4.490 +    fix a
   4.491 +    assume "P (f a)" (is "P ?witness")
   4.492 +    show ?thesis by (rule exI [of P ?witness])
   4.493 +  qed
   4.494 +qed
   4.495  
   4.496  text {*
   4.497   While explicit rule instantiation may occasionally improve
   4.498 @@ -388,32 +388,32 @@
   4.499   structural clues for the system to infer both the rules and their
   4.500   instances (by higher-order unification).  Thus we may as well prune
   4.501   the text as follows.
   4.502 -*};
   4.503 +*}
   4.504  
   4.505 -lemma "(EX x. P (f x)) --> (EX x. P x)";
   4.506 -proof;
   4.507 -  assume "EX x. P (f x)";
   4.508 -  thus "EX x. P x";
   4.509 -  proof;
   4.510 -    fix a;
   4.511 -    assume "P (f a)";
   4.512 -    show ?thesis; ..;
   4.513 -  qed;
   4.514 -qed;
   4.515 +lemma "(EX x. P (f x)) --> (EX x. P x)"
   4.516 +proof
   4.517 +  assume "EX x. P (f x)"
   4.518 +  thus "EX x. P x"
   4.519 +  proof
   4.520 +    fix a
   4.521 +    assume "P (f a)"
   4.522 +    show ?thesis ..
   4.523 +  qed
   4.524 +qed
   4.525  
   4.526  text {*
   4.527   Explicit $\exists$-elimination as seen above can become quite
   4.528   cumbersome in practice.  The derived Isar language element
   4.529   ``\isakeyword{obtain}'' provides a more handsome way to do
   4.530   generalized existence reasoning.
   4.531 -*};
   4.532 +*}
   4.533  
   4.534 -lemma "(EX x. P (f x)) --> (EX x. P x)";
   4.535 -proof;
   4.536 -  assume "EX x. P (f x)";
   4.537 -  then; obtain a where "P (f a)"; by blast;
   4.538 -  thus "EX x. P x"; ..;
   4.539 -qed;
   4.540 +lemma "(EX x. P (f x)) --> (EX x. P x)"
   4.541 +proof
   4.542 +  assume "EX x. P (f x)"
   4.543 +  then obtain a where "P (f a)" by blast
   4.544 +  thus "EX x. P x" ..
   4.545 +qed
   4.546  
   4.547  text {*
   4.548   Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
   4.549 @@ -423,29 +423,29 @@
   4.550   reasoning involved here, any result exported from the context of an
   4.551   \isakeyword{obtain} statement may \emph{not} refer to the parameters
   4.552   introduced there.
   4.553 -*};
   4.554 +*}
   4.555  
   4.556  
   4.557  
   4.558 -subsubsection {* Deriving rules in Isabelle *};
   4.559 +subsubsection {* Deriving rules in Isabelle *}
   4.560  
   4.561  text {*
   4.562   We derive the conjunction elimination rule from the corresponding
   4.563   projections.  The proof is quite straight-forward, since
   4.564   Isabelle/Isar supports non-atomic goals and assumptions fully
   4.565   transparently.
   4.566 -*};
   4.567 +*}
   4.568  
   4.569 -theorem conjE: "A & B ==> (A ==> B ==> C) ==> C";
   4.570 -proof -;
   4.571 -  assume "A & B";
   4.572 -  assume r: "A ==> B ==> C";
   4.573 -  show C;
   4.574 -  proof (rule r);
   4.575 -    show A; by (rule conjunct1);
   4.576 -    show B; by (rule conjunct2);
   4.577 -  qed;
   4.578 -qed;
   4.579 +theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
   4.580 +proof -
   4.581 +  assume "A & B"
   4.582 +  assume r: "A ==> B ==> C"
   4.583 +  show C
   4.584 +  proof (rule r)
   4.585 +    show A by (rule conjunct1)
   4.586 +    show B by (rule conjunct2)
   4.587 +  qed
   4.588 +qed
   4.589  
   4.590  text {*
   4.591   Note that classic Isabelle handles higher rules in a slightly
   4.592 @@ -454,6 +454,6 @@
   4.593   \texttt{goal} command to decompose the rule into premises and
   4.594   conclusion.  The actual result would then emerge by discharging of
   4.595   the context at \texttt{qed} time.
   4.596 -*};
   4.597 +*}
   4.598  
   4.599 -end;
   4.600 +end
     5.1 --- a/src/HOL/Isar_examples/Cantor.thy	Sun Sep 17 22:15:08 2000 +0200
     5.2 +++ b/src/HOL/Isar_examples/Cantor.thy	Sun Sep 17 22:19:02 2000 +0200
     5.3 @@ -3,14 +3,14 @@
     5.4      Author:     Markus Wenzel, TU Muenchen
     5.5  *)
     5.6  
     5.7 -header {* Cantor's Theorem *};
     5.8 +header {* Cantor's Theorem *}
     5.9  
    5.10 -theory Cantor = Main:;
    5.11 +theory Cantor = Main:
    5.12  
    5.13  text_raw {*
    5.14   \footnote{This is an Isar version of the final example of the
    5.15   Isabelle/HOL manual \cite{isabelle-HOL}.}
    5.16 -*};
    5.17 +*}
    5.18  
    5.19  text {*
    5.20   Cantor's Theorem states that every set has more subsets than it has
    5.21 @@ -28,33 +28,33 @@
    5.22    
    5.23   \bigskip We first consider a slightly awkward version of the proof,
    5.24   with the innermost reasoning expressed quite naively.
    5.25 -*};
    5.26 +*}
    5.27  
    5.28 -theorem "EX S. S ~: range (f :: 'a => 'a set)";
    5.29 -proof;
    5.30 -  let ?S = "{x. x ~: f x}";
    5.31 -  show "?S ~: range f";
    5.32 -  proof;
    5.33 -    assume "?S : range f";
    5.34 -    thus False;
    5.35 -    proof;
    5.36 -      fix y; 
    5.37 -      assume "?S = f y";
    5.38 -      thus ?thesis;
    5.39 -      proof (rule equalityCE);
    5.40 -        assume in_S: "y : ?S";
    5.41 -        assume in_fy: "y : f y";
    5.42 -        from in_S; have notin_fy: "y ~: f y"; ..;
    5.43 -        from notin_fy in_fy; show ?thesis; by contradiction;
    5.44 -      next;
    5.45 -        assume notin_S: "y ~: ?S";
    5.46 -        assume notin_fy: "y ~: f y";
    5.47 -        from notin_S; have in_fy: "y : f y"; ..;
    5.48 -        from notin_fy in_fy; show ?thesis; by contradiction;
    5.49 -      qed;
    5.50 -    qed;
    5.51 -  qed;
    5.52 -qed;
    5.53 +theorem "EX S. S ~: range (f :: 'a => 'a set)"
    5.54 +proof
    5.55 +  let ?S = "{x. x ~: f x}"
    5.56 +  show "?S ~: range f"
    5.57 +  proof
    5.58 +    assume "?S : range f"
    5.59 +    thus False
    5.60 +    proof
    5.61 +      fix y 
    5.62 +      assume "?S = f y"
    5.63 +      thus ?thesis
    5.64 +      proof (rule equalityCE)
    5.65 +        assume in_S: "y : ?S"
    5.66 +        assume in_fy: "y : f y"
    5.67 +        from in_S have notin_fy: "y ~: f y" ..
    5.68 +        from notin_fy in_fy show ?thesis by contradiction
    5.69 +      next
    5.70 +        assume notin_S: "y ~: ?S"
    5.71 +        assume notin_fy: "y ~: f y"
    5.72 +        from notin_S have in_fy: "y : f y" ..
    5.73 +        from notin_fy in_fy show ?thesis by contradiction
    5.74 +      qed
    5.75 +    qed
    5.76 +  qed
    5.77 +qed
    5.78  
    5.79  text {*
    5.80   The following version of the proof essentially does the same
    5.81 @@ -67,31 +67,31 @@
    5.82   basic logical structure has to be left intact, though.  In
    5.83   particular, assumptions ``belonging'' to some goal have to be
    5.84   introduced \emph{before} its corresponding \isacommand{show}.}
    5.85 -*};
    5.86 +*}
    5.87  
    5.88 -theorem "EX S. S ~: range (f :: 'a => 'a set)";
    5.89 -proof;
    5.90 -  let ?S = "{x. x ~: f x}";
    5.91 -  show "?S ~: range f";
    5.92 -  proof;
    5.93 -    assume "?S : range f";
    5.94 -    thus False;
    5.95 -    proof;
    5.96 -      fix y; 
    5.97 -      assume "?S = f y";
    5.98 -      thus ?thesis;
    5.99 -      proof (rule equalityCE);
   5.100 -        assume "y : f y";
   5.101 -        assume "y : ?S"; hence "y ~: f y"; ..;
   5.102 -        thus ?thesis; by contradiction;
   5.103 -      next;
   5.104 -        assume "y ~: f y";
   5.105 -        assume "y ~: ?S"; hence "y : f y"; ..;
   5.106 -        thus ?thesis; by contradiction;
   5.107 -      qed;
   5.108 -    qed;
   5.109 -  qed;
   5.110 -qed;
   5.111 +theorem "EX S. S ~: range (f :: 'a => 'a set)"
   5.112 +proof
   5.113 +  let ?S = "{x. x ~: f x}"
   5.114 +  show "?S ~: range f"
   5.115 +  proof
   5.116 +    assume "?S : range f"
   5.117 +    thus False
   5.118 +    proof
   5.119 +      fix y 
   5.120 +      assume "?S = f y"
   5.121 +      thus ?thesis
   5.122 +      proof (rule equalityCE)
   5.123 +        assume "y : f y"
   5.124 +        assume "y : ?S" hence "y ~: f y" ..
   5.125 +        thus ?thesis by contradiction
   5.126 +      next
   5.127 +        assume "y ~: f y"
   5.128 +        assume "y ~: ?S" hence "y : f y" ..
   5.129 +        thus ?thesis by contradiction
   5.130 +      qed
   5.131 +    qed
   5.132 +  qed
   5.133 +qed
   5.134  
   5.135  text {*
   5.136   How much creativity is required?  As it happens, Isabelle can prove
   5.137 @@ -100,10 +100,10 @@
   5.138   through the large search space.  The context of Isabelle's classical
   5.139   prover contains rules for the relevant constructs of HOL's set
   5.140   theory.
   5.141 -*};
   5.142 +*}
   5.143  
   5.144 -theorem "EX S. S ~: range (f :: 'a => 'a set)";
   5.145 -  by best;
   5.146 +theorem "EX S. S ~: range (f :: 'a => 'a set)"
   5.147 +  by best
   5.148  
   5.149  text {*
   5.150   While this establishes the same theorem internally, we do not get any
   5.151 @@ -111,6 +111,6 @@
   5.152   transform internal system-level representations of Isabelle proofs
   5.153   back into Isar text.  Writing intelligible proof documents
   5.154   really is a creative process, after all.
   5.155 -*};
   5.156 +*}
   5.157  
   5.158 -end;
   5.159 +end
     6.1 --- a/src/HOL/Isar_examples/ExprCompiler.thy	Sun Sep 17 22:15:08 2000 +0200
     6.2 +++ b/src/HOL/Isar_examples/ExprCompiler.thy	Sun Sep 17 22:19:02 2000 +0200
     6.3 @@ -5,75 +5,75 @@
     6.4  Correctness of a simple expression/stack-machine compiler.
     6.5  *)
     6.6  
     6.7 -header {* Correctness of a simple expression compiler *};
     6.8 +header {* Correctness of a simple expression compiler *}
     6.9  
    6.10 -theory ExprCompiler = Main:;
    6.11 +theory ExprCompiler = Main:
    6.12  
    6.13  text {*
    6.14   This is a (rather trivial) example of program verification.  We model
    6.15   a compiler for translating expressions to stack machine instructions,
    6.16   and prove its correctness wrt.\ some evaluation semantics.
    6.17 -*};
    6.18 +*}
    6.19  
    6.20  
    6.21 -subsection {* Binary operations *};
    6.22 +subsection {* Binary operations *}
    6.23  
    6.24  text {*
    6.25   Binary operations are just functions over some type of values.  This
    6.26   is both for abstract syntax and semantics, i.e.\ we use a ``shallow
    6.27   embedding'' here.
    6.28 -*};
    6.29 +*}
    6.30  
    6.31  types
    6.32 -  'val binop = "'val => 'val => 'val";
    6.33 +  'val binop = "'val => 'val => 'val"
    6.34  
    6.35  
    6.36 -subsection {* Expressions *};
    6.37 +subsection {* Expressions *}
    6.38  
    6.39  text {*
    6.40   The language of expressions is defined as an inductive type,
    6.41   consisting of variables, constants, and binary operations on
    6.42   expressions.
    6.43 -*};
    6.44 +*}
    6.45  
    6.46  datatype ('adr, 'val) expr =
    6.47    Variable 'adr |
    6.48    Constant 'val |
    6.49 -  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr";
    6.50 +  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
    6.51  
    6.52  text {*
    6.53   Evaluation (wrt.\ some environment of variable assignments) is
    6.54   defined by primitive recursion over the structure of expressions.
    6.55 -*};
    6.56 +*}
    6.57  
    6.58  consts
    6.59 -  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val";
    6.60 +  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
    6.61  
    6.62  primrec
    6.63    "eval (Variable x) env = env x"
    6.64    "eval (Constant c) env = c"
    6.65 -  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)";
    6.66 +  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
    6.67  
    6.68  
    6.69 -subsection {* Machine *};
    6.70 +subsection {* Machine *}
    6.71  
    6.72  text {*
    6.73   Next we model a simple stack machine, with three instructions.
    6.74 -*};
    6.75 +*}
    6.76  
    6.77  datatype ('adr, 'val) instr =
    6.78    Const 'val |
    6.79    Load 'adr |
    6.80 -  Apply "'val binop";
    6.81 +  Apply "'val binop"
    6.82  
    6.83  text {*
    6.84   Execution of a list of stack machine instructions is easily defined
    6.85   as follows.
    6.86 -*};
    6.87 +*}
    6.88  
    6.89  consts
    6.90    exec :: "(('adr, 'val) instr) list
    6.91 -    => 'val list => ('adr => 'val) => 'val list";
    6.92 +    => 'val list => ('adr => 'val) => 'val list"
    6.93  
    6.94  primrec
    6.95    "exec [] stack env = stack"
    6.96 @@ -82,62 +82,62 @@
    6.97        Const c => exec instrs (c # stack) env
    6.98      | Load x => exec instrs (env x # stack) env
    6.99      | Apply f => exec instrs (f (hd stack) (hd (tl stack))
   6.100 -                   # (tl (tl stack))) env)";
   6.101 +                   # (tl (tl stack))) env)"
   6.102  
   6.103  constdefs
   6.104    execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
   6.105 -  "execute instrs env == hd (exec instrs [] env)";
   6.106 +  "execute instrs env == hd (exec instrs [] env)"
   6.107  
   6.108  
   6.109 -subsection {* Compiler *};
   6.110 +subsection {* Compiler *}
   6.111  
   6.112  text {*
   6.113   We are ready to define the compilation function of expressions to
   6.114   lists of stack machine instructions.
   6.115 -*};
   6.116 +*}
   6.117  
   6.118  consts
   6.119 -  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list";
   6.120 +  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
   6.121  
   6.122  primrec
   6.123    "compile (Variable x) = [Load x]"
   6.124    "compile (Constant c) = [Const c]"
   6.125 -  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]";
   6.126 +  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
   6.127  
   6.128  
   6.129  text {*
   6.130   The main result of this development is the correctness theorem for
   6.131   $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
   6.132   list append.
   6.133 -*};
   6.134 +*}
   6.135  
   6.136  lemma exec_append:
   6.137    "ALL stack. exec (xs @ ys) stack env =
   6.138 -    exec ys (exec xs stack env) env" (is "?P xs");
   6.139 -proof (induct ?P xs type: list);
   6.140 -  show "?P []"; by simp;
   6.141 +    exec ys (exec xs stack env) env" (is "?P xs")
   6.142 +proof (induct ?P xs type: list)
   6.143 +  show "?P []" by simp
   6.144  
   6.145 -  fix x xs; assume "?P xs";
   6.146 -  show "?P (x # xs)" (is "?Q x");
   6.147 -  proof (induct ?Q x type: instr);
   6.148 -    show "!!val. ?Q (Const val)"; by (simp!);
   6.149 -    show "!!adr. ?Q (Load adr)"; by (simp!);
   6.150 -    show "!!fun. ?Q (Apply fun)"; by (simp!);
   6.151 -  qed;
   6.152 -qed;
   6.153 +  fix x xs assume "?P xs"
   6.154 +  show "?P (x # xs)" (is "?Q x")
   6.155 +  proof (induct ?Q x type: instr)
   6.156 +    show "!!val. ?Q (Const val)" by (simp!)
   6.157 +    show "!!adr. ?Q (Load adr)" by (simp!)
   6.158 +    show "!!fun. ?Q (Apply fun)" by (simp!)
   6.159 +  qed
   6.160 +qed
   6.161  
   6.162 -theorem correctness: "execute (compile e) env = eval e env";
   6.163 -proof -;
   6.164 +theorem correctness: "execute (compile e) env = eval e env"
   6.165 +proof -
   6.166    have "ALL stack. exec (compile e) stack env =
   6.167 -    eval e env # stack" (is "?P e");
   6.168 -  proof (induct ?P e type: expr);
   6.169 -    show "!!adr. ?P (Variable adr)"; by simp;
   6.170 -    show "!!val. ?P (Constant val)"; by simp;
   6.171 -    show "!!fun e1 e2. (?P e1 ==> ?P e2 ==> ?P (Binop fun e1 e2))";
   6.172 -      by (simp add: exec_append);
   6.173 -  qed;
   6.174 -  thus ?thesis; by (simp add: execute_def);
   6.175 -qed;
   6.176 +    eval e env # stack" (is "?P e")
   6.177 +  proof (induct ?P e type: expr)
   6.178 +    show "!!adr. ?P (Variable adr)" by simp
   6.179 +    show "!!val. ?P (Constant val)" by simp
   6.180 +    show "!!fun e1 e2. (?P e1 ==> ?P e2 ==> ?P (Binop fun e1 e2))"
   6.181 +      by (simp add: exec_append)
   6.182 +  qed
   6.183 +  thus ?thesis by (simp add: execute_def)
   6.184 +qed
   6.185  
   6.186  
   6.187  text {*
   6.188 @@ -146,103 +146,103 @@
   6.189   Subsequently, the same reasoning is elaborated in detail --- at most
   6.190   one recursive function definition is used at a time.  Thus we get a
   6.191   better idea of what is actually going on.
   6.192 -*};
   6.193 +*}
   6.194  
   6.195  lemma exec_append:
   6.196    "ALL stack. exec (xs @ ys) stack env
   6.197 -    = exec ys (exec xs stack env) env" (is "?P xs");
   6.198 -proof (induct ?P xs);
   6.199 -  show "?P []" (is "ALL s. ?Q s");
   6.200 -  proof;
   6.201 -    fix s; have "exec ([] @ ys) s env = exec ys s env"; by simp;
   6.202 -    also; have "... = exec ys (exec [] s env) env"; by simp;
   6.203 -    finally; show "?Q s"; .;
   6.204 -  qed;
   6.205 -  fix x xs; assume hyp: "?P xs";
   6.206 -  show "?P (x # xs)";
   6.207 -  proof (induct x);
   6.208 -    fix val;
   6.209 -    show "?P (Const val # xs)" (is "ALL s. ?Q s");
   6.210 -    proof;
   6.211 -      fix s;
   6.212 +    = exec ys (exec xs stack env) env" (is "?P xs")
   6.213 +proof (induct ?P xs)
   6.214 +  show "?P []" (is "ALL s. ?Q s")
   6.215 +  proof
   6.216 +    fix s have "exec ([] @ ys) s env = exec ys s env" by simp
   6.217 +    also have "... = exec ys (exec [] s env) env" by simp
   6.218 +    finally show "?Q s" .
   6.219 +  qed
   6.220 +  fix x xs assume hyp: "?P xs"
   6.221 +  show "?P (x # xs)"
   6.222 +  proof (induct x)
   6.223 +    fix val
   6.224 +    show "?P (Const val # xs)" (is "ALL s. ?Q s")
   6.225 +    proof
   6.226 +      fix s
   6.227        have "exec ((Const val # xs) @ ys) s env =
   6.228 -          exec (Const val # xs @ ys) s env";
   6.229 -        by simp;
   6.230 -      also; have "... = exec (xs @ ys) (val # s) env"; by simp;
   6.231 -      also; from hyp;
   6.232 -        have "... = exec ys (exec xs (val # s) env) env"; ..;
   6.233 -      also; have "... = exec ys (exec (Const val # xs) s env) env";
   6.234 -        by simp;
   6.235 -      finally; show "?Q s"; .;
   6.236 -    qed;
   6.237 -  next;
   6.238 -    fix adr; from hyp; show "?P (Load adr # xs)"; by simp -- {* same as above *};
   6.239 -  next;
   6.240 -    fix fun;
   6.241 -    show "?P (Apply fun # xs)" (is "ALL s. ?Q s");
   6.242 -    proof;
   6.243 -      fix s;
   6.244 +          exec (Const val # xs @ ys) s env"
   6.245 +        by simp
   6.246 +      also have "... = exec (xs @ ys) (val # s) env" by simp
   6.247 +      also from hyp
   6.248 +        have "... = exec ys (exec xs (val # s) env) env" ..
   6.249 +      also have "... = exec ys (exec (Const val # xs) s env) env"
   6.250 +        by simp
   6.251 +      finally show "?Q s" .
   6.252 +    qed
   6.253 +  next
   6.254 +    fix adr from hyp show "?P (Load adr # xs)" by simp -- {* same as above *}
   6.255 +  next
   6.256 +    fix fun
   6.257 +    show "?P (Apply fun # xs)" (is "ALL s. ?Q s")
   6.258 +    proof
   6.259 +      fix s
   6.260        have "exec ((Apply fun # xs) @ ys) s env =
   6.261 -          exec (Apply fun # xs @ ys) s env";
   6.262 -        by simp;
   6.263 -      also; have "... =
   6.264 -          exec (xs @ ys) (fun (hd s) (hd (tl s)) # (tl (tl s))) env";
   6.265 -        by simp;
   6.266 -      also; from hyp; have "... =
   6.267 -        exec ys (exec xs (fun (hd s) (hd (tl s)) # tl (tl s)) env) env";
   6.268 -        ..;
   6.269 -      also; have "... = exec ys (exec (Apply fun # xs) s env) env"; by simp;
   6.270 -      finally; show "?Q s"; .;
   6.271 -    qed;
   6.272 -  qed;
   6.273 -qed;
   6.274 +          exec (Apply fun # xs @ ys) s env"
   6.275 +        by simp
   6.276 +      also have "... =
   6.277 +          exec (xs @ ys) (fun (hd s) (hd (tl s)) # (tl (tl s))) env"
   6.278 +        by simp
   6.279 +      also from hyp have "... =
   6.280 +        exec ys (exec xs (fun (hd s) (hd (tl s)) # tl (tl s)) env) env"
   6.281 +        ..
   6.282 +      also have "... = exec ys (exec (Apply fun # xs) s env) env" by simp
   6.283 +      finally show "?Q s" .
   6.284 +    qed
   6.285 +  qed
   6.286 +qed
   6.287  
   6.288 -theorem correctness: "execute (compile e) env = eval e env";
   6.289 -proof -;
   6.290 +theorem correctness: "execute (compile e) env = eval e env"
   6.291 +proof -
   6.292    have exec_compile:
   6.293      "ALL stack. exec (compile e) stack env = eval e env # stack"
   6.294 -    (is "?P e");
   6.295 -  proof (induct e);
   6.296 -    fix adr; show "?P (Variable adr)" (is "ALL s. ?Q s");
   6.297 -    proof;
   6.298 -      fix s;
   6.299 -      have "exec (compile (Variable adr)) s env = exec [Load adr] s env";
   6.300 -        by simp;
   6.301 -      also; have "... = env adr # s"; by simp;
   6.302 -      also; have "env adr = eval (Variable adr) env"; by simp;
   6.303 -      finally; show "?Q s"; .;
   6.304 -    qed;
   6.305 -  next;
   6.306 -    fix val; show "?P (Constant val)"; by simp -- {* same as above *};
   6.307 -  next;
   6.308 -    fix fun e1 e2; assume hyp1: "?P e1" and hyp2: "?P e2";
   6.309 -    show "?P (Binop fun e1 e2)" (is "ALL s. ?Q s");
   6.310 -    proof;
   6.311 -      fix s; have "exec (compile (Binop fun e1 e2)) s env
   6.312 -        = exec (compile e2 @ compile e1 @ [Apply fun]) s env"; by simp;
   6.313 -      also; have "... = exec [Apply fun]
   6.314 -          (exec (compile e1) (exec (compile e2) s env) env) env";
   6.315 -        by (simp only: exec_append);
   6.316 -      also; from hyp2;
   6.317 -        have "exec (compile e2) s env = eval e2 env # s"; ..;
   6.318 -      also; from hyp1;
   6.319 -        have "exec (compile e1) ... env = eval e1 env # ..."; ..;
   6.320 -      also; have "exec [Apply fun] ... env =
   6.321 -        fun (hd ...) (hd (tl ...)) # (tl (tl ...))"; by simp;
   6.322 -      also; have "... = fun (eval e1 env) (eval e2 env) # s"; by simp;
   6.323 -      also; have "fun (eval e1 env) (eval e2 env) =
   6.324 -          eval (Binop fun e1 e2) env";
   6.325 -        by simp;
   6.326 -      finally; show "?Q s"; .;
   6.327 -    qed;
   6.328 -  qed;
   6.329 +    (is "?P e")
   6.330 +  proof (induct e)
   6.331 +    fix adr show "?P (Variable adr)" (is "ALL s. ?Q s")
   6.332 +    proof
   6.333 +      fix s
   6.334 +      have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
   6.335 +        by simp
   6.336 +      also have "... = env adr # s" by simp
   6.337 +      also have "env adr = eval (Variable adr) env" by simp
   6.338 +      finally show "?Q s" .
   6.339 +    qed
   6.340 +  next
   6.341 +    fix val show "?P (Constant val)" by simp -- {* same as above *}
   6.342 +  next
   6.343 +    fix fun e1 e2 assume hyp1: "?P e1" and hyp2: "?P e2"
   6.344 +    show "?P (Binop fun e1 e2)" (is "ALL s. ?Q s")
   6.345 +    proof
   6.346 +      fix s have "exec (compile (Binop fun e1 e2)) s env
   6.347 +        = exec (compile e2 @ compile e1 @ [Apply fun]) s env" by simp
   6.348 +      also have "... = exec [Apply fun]
   6.349 +          (exec (compile e1) (exec (compile e2) s env) env) env"
   6.350 +        by (simp only: exec_append)
   6.351 +      also from hyp2
   6.352 +        have "exec (compile e2) s env = eval e2 env # s" ..
   6.353 +      also from hyp1
   6.354 +        have "exec (compile e1) ... env = eval e1 env # ..." ..
   6.355 +      also have "exec [Apply fun] ... env =
   6.356 +        fun (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
   6.357 +      also have "... = fun (eval e1 env) (eval e2 env) # s" by simp
   6.358 +      also have "fun (eval e1 env) (eval e2 env) =
   6.359 +          eval (Binop fun e1 e2) env"
   6.360 +        by simp
   6.361 +      finally show "?Q s" .
   6.362 +    qed
   6.363 +  qed
   6.364  
   6.365 -  have "execute (compile e) env = hd (exec (compile e) [] env)";
   6.366 -    by (simp add: execute_def);
   6.367 -  also; from exec_compile;
   6.368 -    have "exec (compile e) [] env = [eval e env]"; ..;
   6.369 -  also; have "hd ... = eval e env"; by simp;
   6.370 -  finally; show ?thesis; .;
   6.371 -qed;
   6.372 +  have "execute (compile e) env = hd (exec (compile e) [] env)"
   6.373 +    by (simp add: execute_def)
   6.374 +  also from exec_compile
   6.375 +    have "exec (compile e) [] env = [eval e env]" ..
   6.376 +  also have "hd ... = eval e env" by simp
   6.377 +  finally show ?thesis .
   6.378 +qed
   6.379  
   6.380 -end;
   6.381 +end
     7.1 --- a/src/HOL/Isar_examples/Fibonacci.thy	Sun Sep 17 22:15:08 2000 +0200
     7.2 +++ b/src/HOL/Isar_examples/Fibonacci.thy	Sun Sep 17 22:19:02 2000 +0200
     7.3 @@ -13,153 +13,153 @@
     7.4    (Addison-Wesley, 1989)
     7.5  *)
     7.6  
     7.7 -header {* Fib and Gcd commute *};
     7.8 +header {* Fib and Gcd commute *}
     7.9  
    7.10 -theory Fibonacci = Primes:;
    7.11 +theory Fibonacci = Primes:
    7.12  
    7.13  text_raw {*
    7.14   \footnote{Isar version by Gertrud Bauer.  Original tactic script by
    7.15   Larry Paulson.  A few proofs of laws taken from
    7.16   \cite{Concrete-Math}.}
    7.17 -*};
    7.18 +*}
    7.19  
    7.20  
    7.21 -subsection {* Fibonacci numbers *};
    7.22 +subsection {* Fibonacci numbers *}
    7.23  
    7.24 -consts fib :: "nat => nat";
    7.25 +consts fib :: "nat => nat"
    7.26  recdef fib less_than
    7.27   "fib 0 = 0"
    7.28   "fib 1 = 1"
    7.29 - "fib (Suc (Suc x)) = fib x + fib (Suc x)";
    7.30 + "fib (Suc (Suc x)) = fib x + fib (Suc x)"
    7.31  
    7.32 -lemma [simp]: "0 < fib (Suc n)";
    7.33 -  by (induct n rule: fib.induct) (simp+);
    7.34 +lemma [simp]: "0 < fib (Suc n)"
    7.35 +  by (induct n rule: fib.induct) (simp+)
    7.36  
    7.37  
    7.38 -text {* Alternative induction rule. *};
    7.39 +text {* Alternative induction rule. *}
    7.40  
    7.41  theorem fib_induct:
    7.42 -    "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P n";
    7.43 -  by (induct rule: fib.induct, simp+);
    7.44 +    "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P n"
    7.45 +  by (induct rule: fib.induct, simp+)
    7.46  
    7.47  
    7.48  
    7.49 -subsection {* Fib and gcd commute *};
    7.50 +subsection {* Fib and gcd commute *}
    7.51  
    7.52 -text {* A few laws taken from \cite{Concrete-Math}. *};
    7.53 +text {* A few laws taken from \cite{Concrete-Math}. *}
    7.54  
    7.55  lemma fib_add:
    7.56    "fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n"
    7.57    (is "?P n")
    7.58 -  -- {* see \cite[page 280]{Concrete-Math} *};
    7.59 -proof (induct ?P n rule: fib_induct);
    7.60 -  show "?P 0"; by simp;
    7.61 -  show "?P 1"; by simp;
    7.62 -  fix n;
    7.63 +  -- {* see \cite[page 280]{Concrete-Math} *}
    7.64 +proof (induct ?P n rule: fib_induct)
    7.65 +  show "?P 0" by simp
    7.66 +  show "?P 1" by simp
    7.67 +  fix n
    7.68    have "fib (n + 2 + k + 1)
    7.69 -    = fib (n + k + 1) + fib (n + 1 + k + 1)"; by simp;
    7.70 -  also; assume "fib (n + k + 1)
    7.71 +    = fib (n + k + 1) + fib (n + 1 + k + 1)" by simp
    7.72 +  also assume "fib (n + k + 1)
    7.73      = fib (k + 1) * fib (n + 1) + fib k * fib n"
    7.74 -      (is " _ = ?R1");
    7.75 -  also; assume "fib (n + 1 + k + 1)
    7.76 +      (is " _ = ?R1")
    7.77 +  also assume "fib (n + 1 + k + 1)
    7.78      = fib (k + 1) * fib (n + 1 + 1) + fib k * fib (n + 1)"
    7.79 -      (is " _ = ?R2");
    7.80 -  also; have "?R1 + ?R2
    7.81 -    = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)";
    7.82 -    by (simp add: add_mult_distrib2);
    7.83 -  finally; show "?P (n + 2)"; .;
    7.84 -qed;
    7.85 +      (is " _ = ?R2")
    7.86 +  also have "?R1 + ?R2
    7.87 +    = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)"
    7.88 +    by (simp add: add_mult_distrib2)
    7.89 +  finally show "?P (n + 2)" .
    7.90 +qed
    7.91  
    7.92 -lemma gcd_fib_Suc_eq_1: "gcd (fib n, fib (n + 1)) = 1" (is "?P n");
    7.93 -proof (induct ?P n rule: fib_induct);
    7.94 -  show "?P 0"; by simp;
    7.95 -  show "?P 1"; by simp;
    7.96 -  fix n;
    7.97 -  have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)";
    7.98 -    by simp;
    7.99 -  also; have "gcd (fib (n + 2), ...) = gcd (fib (n + 2), fib (n + 1))";
   7.100 -    by (simp only: gcd_add2');
   7.101 -  also; have "... = gcd (fib (n + 1), fib (n + 1 + 1))";
   7.102 -    by (simp add: gcd_commute);
   7.103 -  also; assume "... = 1";
   7.104 -  finally; show "?P (n + 2)"; .;
   7.105 -qed;
   7.106 +lemma gcd_fib_Suc_eq_1: "gcd (fib n, fib (n + 1)) = 1" (is "?P n")
   7.107 +proof (induct ?P n rule: fib_induct)
   7.108 +  show "?P 0" by simp
   7.109 +  show "?P 1" by simp
   7.110 +  fix n
   7.111 +  have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)"
   7.112 +    by simp
   7.113 +  also have "gcd (fib (n + 2), ...) = gcd (fib (n + 2), fib (n + 1))"
   7.114 +    by (simp only: gcd_add2')
   7.115 +  also have "... = gcd (fib (n + 1), fib (n + 1 + 1))"
   7.116 +    by (simp add: gcd_commute)
   7.117 +  also assume "... = 1"
   7.118 +  finally show "?P (n + 2)" .
   7.119 +qed
   7.120  
   7.121 -lemma gcd_mult_add: "0 < n ==> gcd (n * k + m, n) = gcd (m, n)";
   7.122 -proof -;
   7.123 -  assume "0 < n";
   7.124 -  hence "gcd (n * k + m, n) = gcd (n, m mod n)";
   7.125 -    by (simp add: gcd_non_0 add_commute);
   7.126 -  also; have "... = gcd (m, n)"; by (simp! add: gcd_non_0);
   7.127 -  finally; show ?thesis; .;
   7.128 -qed;
   7.129 +lemma gcd_mult_add: "0 < n ==> gcd (n * k + m, n) = gcd (m, n)"
   7.130 +proof -
   7.131 +  assume "0 < n"
   7.132 +  hence "gcd (n * k + m, n) = gcd (n, m mod n)"
   7.133 +    by (simp add: gcd_non_0 add_commute)
   7.134 +  also have "... = gcd (m, n)" by (simp! add: gcd_non_0)
   7.135 +  finally show ?thesis .
   7.136 +qed
   7.137  
   7.138 -lemma gcd_fib_add: "gcd (fib m, fib (n + m)) = gcd (fib m, fib n)";
   7.139 -proof (cases m);
   7.140 -  assume "m = 0";
   7.141 -  thus ?thesis; by simp;
   7.142 -next;
   7.143 -  fix k; assume "m = Suc k";
   7.144 -  hence "gcd (fib m, fib (n + m)) = gcd (fib (n + k + 1), fib (k + 1))";
   7.145 -    by (simp add: gcd_commute);
   7.146 -  also; have "fib (n + k + 1)
   7.147 -    = fib (k + 1) * fib (n + 1) + fib k * fib n";
   7.148 -    by (rule fib_add);
   7.149 -  also; have "gcd (..., fib (k + 1)) = gcd (fib k * fib n, fib (k + 1))";
   7.150 -    by (simp add: gcd_mult_add);
   7.151 -  also; have "... = gcd (fib n, fib (k + 1))";
   7.152 -    by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel);
   7.153 -  also; have "... = gcd (fib m, fib n)";
   7.154 -    by (simp! add: gcd_commute);
   7.155 -  finally; show ?thesis; .;
   7.156 -qed;
   7.157 +lemma gcd_fib_add: "gcd (fib m, fib (n + m)) = gcd (fib m, fib n)"
   7.158 +proof (cases m)
   7.159 +  assume "m = 0"
   7.160 +  thus ?thesis by simp
   7.161 +next
   7.162 +  fix k assume "m = Suc k"
   7.163 +  hence "gcd (fib m, fib (n + m)) = gcd (fib (n + k + 1), fib (k + 1))"
   7.164 +    by (simp add: gcd_commute)
   7.165 +  also have "fib (n + k + 1)
   7.166 +    = fib (k + 1) * fib (n + 1) + fib k * fib n"
   7.167 +    by (rule fib_add)
   7.168 +  also have "gcd (..., fib (k + 1)) = gcd (fib k * fib n, fib (k + 1))"
   7.169 +    by (simp add: gcd_mult_add)
   7.170 +  also have "... = gcd (fib n, fib (k + 1))"
   7.171 +    by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel)
   7.172 +  also have "... = gcd (fib m, fib n)"
   7.173 +    by (simp! add: gcd_commute)
   7.174 +  finally show ?thesis .
   7.175 +qed
   7.176  
   7.177  lemma gcd_fib_diff:
   7.178 -  "m <= n ==> gcd (fib m, fib (n - m)) = gcd (fib m, fib n)";
   7.179 -proof -;
   7.180 -  assume "m <= n";
   7.181 -  have "gcd (fib m, fib (n - m)) = gcd (fib m, fib (n - m + m))";
   7.182 -    by (simp add: gcd_fib_add);
   7.183 -  also; have "n - m + m = n"; by (simp!);
   7.184 -  finally; show ?thesis; .;
   7.185 -qed;
   7.186 +  "m <= n ==> gcd (fib m, fib (n - m)) = gcd (fib m, fib n)"
   7.187 +proof -
   7.188 +  assume "m <= n"
   7.189 +  have "gcd (fib m, fib (n - m)) = gcd (fib m, fib (n - m + m))"
   7.190 +    by (simp add: gcd_fib_add)
   7.191 +  also have "n - m + m = n" by (simp!)
   7.192 +  finally show ?thesis .
   7.193 +qed
   7.194  
   7.195  lemma gcd_fib_mod:
   7.196 -  "0 < m ==> gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)";
   7.197 -proof (induct n rule: nat_less_induct);
   7.198 -  fix n;
   7.199 +  "0 < m ==> gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)"
   7.200 +proof (induct n rule: nat_less_induct)
   7.201 +  fix n
   7.202    assume m: "0 < m"
   7.203    and hyp: "ALL ma. ma < n
   7.204 -           --> gcd (fib m, fib (ma mod m)) = gcd (fib m, fib ma)";
   7.205 -  have "n mod m = (if n < m then n else (n - m) mod m)";
   7.206 -    by (rule mod_if);
   7.207 -  also; have "gcd (fib m, fib ...) = gcd (fib m, fib n)";
   7.208 -  proof cases;
   7.209 -    assume "n < m"; thus ?thesis; by simp;
   7.210 -  next;
   7.211 -    assume not_lt: "~ n < m"; hence le: "m <= n"; by simp;
   7.212 -    have "n - m < n"; by (simp! add: diff_less);
   7.213 -    with hyp; have "gcd (fib m, fib ((n - m) mod m))
   7.214 -       = gcd (fib m, fib (n - m))"; by simp;
   7.215 -    also; from le; have "... = gcd (fib m, fib n)";
   7.216 -      by (rule gcd_fib_diff);
   7.217 -    finally; have "gcd (fib m, fib ((n - m) mod m)) =
   7.218 -      gcd (fib m, fib n)"; .;
   7.219 -    with not_lt; show ?thesis; by simp;
   7.220 -  qed;
   7.221 -  finally; show "gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)"; .;
   7.222 -qed;
   7.223 +           --> gcd (fib m, fib (ma mod m)) = gcd (fib m, fib ma)"
   7.224 +  have "n mod m = (if n < m then n else (n - m) mod m)"
   7.225 +    by (rule mod_if)
   7.226 +  also have "gcd (fib m, fib ...) = gcd (fib m, fib n)"
   7.227 +  proof cases
   7.228 +    assume "n < m" thus ?thesis by simp
   7.229 +  next
   7.230 +    assume not_lt: "~ n < m" hence le: "m <= n" by simp
   7.231 +    have "n - m < n" by (simp! add: diff_less)
   7.232 +    with hyp have "gcd (fib m, fib ((n - m) mod m))
   7.233 +       = gcd (fib m, fib (n - m))" by simp
   7.234 +    also from le have "... = gcd (fib m, fib n)"
   7.235 +      by (rule gcd_fib_diff)
   7.236 +    finally have "gcd (fib m, fib ((n - m) mod m)) =
   7.237 +      gcd (fib m, fib n)" .
   7.238 +    with not_lt show ?thesis by simp
   7.239 +  qed
   7.240 +  finally show "gcd (fib m, fib (n mod m)) = gcd (fib m, fib n)" .
   7.241 +qed
   7.242  
   7.243  
   7.244 -theorem fib_gcd: "fib (gcd (m, n)) = gcd (fib m, fib n)" (is "?P m n");
   7.245 -proof (induct ?P m n rule: gcd_induct);
   7.246 -  fix m; show "fib (gcd (m, 0)) = gcd (fib m, fib 0)"; by simp;
   7.247 -  fix n :: nat; assume n: "0 < n";
   7.248 -  hence "gcd (m, n) = gcd (n, m mod n)"; by (rule gcd_non_0);
   7.249 -  also; assume hyp: "fib ... = gcd (fib n, fib (m mod n))";
   7.250 -  also; from n; have "... = gcd (fib n, fib m)"; by (rule gcd_fib_mod);
   7.251 -  also; have "... = gcd (fib m, fib n)"; by (rule gcd_commute);
   7.252 -  finally; show "fib (gcd (m, n)) = gcd (fib m, fib n)"; .;
   7.253 -qed;
   7.254 +theorem fib_gcd: "fib (gcd (m, n)) = gcd (fib m, fib n)" (is "?P m n")
   7.255 +proof (induct ?P m n rule: gcd_induct)
   7.256 +  fix m show "fib (gcd (m, 0)) = gcd (fib m, fib 0)" by simp
   7.257 +  fix n :: nat assume n: "0 < n"
   7.258 +  hence "gcd (m, n) = gcd (n, m mod n)" by (rule gcd_non_0)
   7.259 +  also assume hyp: "fib ... = gcd (fib n, fib (m mod n))"
   7.260 +  also from n have "... = gcd (fib n, fib m)" by (rule gcd_fib_mod)
   7.261 +  also have "... = gcd (fib m, fib n)" by (rule gcd_commute)
   7.262 +  finally show "fib (gcd (m, n)) = gcd (fib m, fib n)" .
   7.263 +qed
   7.264  
   7.265 -end;
   7.266 +end
     8.1 --- a/src/HOL/Isar_examples/Group.thy	Sun Sep 17 22:15:08 2000 +0200
     8.2 +++ b/src/HOL/Isar_examples/Group.thy	Sun Sep 17 22:19:02 2000 +0200
     8.3 @@ -3,73 +3,73 @@
     8.4      Author:     Markus Wenzel, TU Muenchen
     8.5  *)
     8.6  
     8.7 -header {* Basic group theory *};
     8.8 +header {* Basic group theory *}
     8.9  
    8.10 -theory Group = Main:;
    8.11 +theory Group = Main:
    8.12  
    8.13 -subsection {* Groups and calculational reasoning *}; 
    8.14 +subsection {* Groups and calculational reasoning *} 
    8.15  
    8.16  text {*
    8.17   Groups over signature $({\times} :: \alpha \To \alpha \To \alpha,
    8.18   \idt{one} :: \alpha, \idt{inv} :: \alpha \To \alpha)$ are defined as
    8.19   an axiomatic type class as follows.  Note that the parent class
    8.20   $\idt{times}$ is provided by the basic HOL theory.
    8.21 -*};
    8.22 +*}
    8.23  
    8.24  consts
    8.25    one :: "'a"
    8.26 -  inv :: "'a => 'a";
    8.27 +  inv :: "'a => 'a"
    8.28  
    8.29  axclass
    8.30    group < times
    8.31    group_assoc:         "(x * y) * z = x * (y * z)"
    8.32    group_left_unit:     "one * x = x"
    8.33 -  group_left_inverse:  "inv x * x = one";
    8.34 +  group_left_inverse:  "inv x * x = one"
    8.35  
    8.36  text {*
    8.37   The group axioms only state the properties of left unit and inverse,
    8.38   the right versions may be derived as follows.
    8.39 -*};
    8.40 +*}
    8.41  
    8.42 -theorem group_right_inverse: "x * inv x = (one::'a::group)";
    8.43 -proof -;
    8.44 -  have "x * inv x = one * (x * inv x)";
    8.45 -    by (simp only: group_left_unit);
    8.46 -  also; have "... = one * x * inv x";
    8.47 -    by (simp only: group_assoc);
    8.48 -  also; have "... = inv (inv x) * inv x * x * inv x";
    8.49 -    by (simp only: group_left_inverse);
    8.50 -  also; have "... = inv (inv x) * (inv x * x) * inv x";
    8.51 -    by (simp only: group_assoc);
    8.52 -  also; have "... = inv (inv x) * one * inv x";
    8.53 -    by (simp only: group_left_inverse);
    8.54 -  also; have "... = inv (inv x) * (one * inv x)";
    8.55 -    by (simp only: group_assoc);
    8.56 -  also; have "... = inv (inv x) * inv x";
    8.57 -    by (simp only: group_left_unit);
    8.58 -  also; have "... = one";
    8.59 -    by (simp only: group_left_inverse);
    8.60 -  finally; show ?thesis; .;
    8.61 -qed;
    8.62 +theorem group_right_inverse: "x * inv x = (one::'a::group)"
    8.63 +proof -
    8.64 +  have "x * inv x = one * (x * inv x)"
    8.65 +    by (simp only: group_left_unit)
    8.66 +  also have "... = one * x * inv x"
    8.67 +    by (simp only: group_assoc)
    8.68 +  also have "... = inv (inv x) * inv x * x * inv x"
    8.69 +    by (simp only: group_left_inverse)
    8.70 +  also have "... = inv (inv x) * (inv x * x) * inv x"
    8.71 +    by (simp only: group_assoc)
    8.72 +  also have "... = inv (inv x) * one * inv x"
    8.73 +    by (simp only: group_left_inverse)
    8.74 +  also have "... = inv (inv x) * (one * inv x)"
    8.75 +    by (simp only: group_assoc)
    8.76 +  also have "... = inv (inv x) * inv x"
    8.77 +    by (simp only: group_left_unit)
    8.78 +  also have "... = one"
    8.79 +    by (simp only: group_left_inverse)
    8.80 +  finally show ?thesis .
    8.81 +qed
    8.82  
    8.83  text {*
    8.84   With \name{group-right-inverse} already available,
    8.85   \name{group-right-unit}\label{thm:group-right-unit} is now
    8.86   established much easier.
    8.87 -*};
    8.88 +*}
    8.89  
    8.90 -theorem group_right_unit: "x * one = (x::'a::group)";
    8.91 -proof -;
    8.92 -  have "x * one = x * (inv x * x)";
    8.93 -    by (simp only: group_left_inverse);
    8.94 -  also; have "... = x * inv x * x";
    8.95 -    by (simp only: group_assoc);
    8.96 -  also; have "... = one * x";
    8.97 -    by (simp only: group_right_inverse);
    8.98 -  also; have "... = x";
    8.99 -    by (simp only: group_left_unit);
   8.100 -  finally; show ?thesis; .;
   8.101 -qed;
   8.102 +theorem group_right_unit: "x * one = (x::'a::group)"
   8.103 +proof -
   8.104 +  have "x * one = x * (inv x * x)"
   8.105 +    by (simp only: group_left_inverse)
   8.106 +  also have "... = x * inv x * x"
   8.107 +    by (simp only: group_assoc)
   8.108 +  also have "... = one * x"
   8.109 +    by (simp only: group_right_inverse)
   8.110 +  also have "... = x"
   8.111 +    by (simp only: group_left_unit)
   8.112 +  finally show ?thesis .
   8.113 +qed
   8.114  
   8.115  text {*
   8.116   \medskip The calculational proof style above follows typical
   8.117 @@ -93,38 +93,38 @@
   8.118   Expanding the \isakeyword{also} and \isakeyword{finally} derived
   8.119   language elements, calculations may be simulated by hand as
   8.120   demonstrated below.
   8.121 -*};
   8.122 +*}
   8.123  
   8.124 -theorem "x * one = (x::'a::group)";
   8.125 -proof -;
   8.126 -  have "x * one = x * (inv x * x)";
   8.127 -    by (simp only: group_left_inverse);
   8.128 +theorem "x * one = (x::'a::group)"
   8.129 +proof -
   8.130 +  have "x * one = x * (inv x * x)"
   8.131 +    by (simp only: group_left_inverse)
   8.132  
   8.133    note calculation = this
   8.134 -    -- {* first calculational step: init calculation register *};
   8.135 +    -- {* first calculational step: init calculation register *}
   8.136  
   8.137 -  have "... = x * inv x * x";
   8.138 -    by (simp only: group_assoc);
   8.139 +  have "... = x * inv x * x"
   8.140 +    by (simp only: group_assoc)
   8.141  
   8.142    note calculation = trans [OF calculation this]
   8.143 -    -- {* general calculational step: compose with transitivity rule *};
   8.144 +    -- {* general calculational step: compose with transitivity rule *}
   8.145  
   8.146 -  have "... = one * x";
   8.147 -    by (simp only: group_right_inverse);
   8.148 +  have "... = one * x"
   8.149 +    by (simp only: group_right_inverse)
   8.150  
   8.151    note calculation = trans [OF calculation this]
   8.152 -    -- {* general calculational step: compose with transitivity rule *};
   8.153 +    -- {* general calculational step: compose with transitivity rule *}
   8.154  
   8.155 -  have "... = x";
   8.156 -    by (simp only: group_left_unit);
   8.157 +  have "... = x"
   8.158 +    by (simp only: group_left_unit)
   8.159  
   8.160    note calculation = trans [OF calculation this]
   8.161 -    -- {* final calculational step: compose with transitivity rule ... *};
   8.162 +    -- {* final calculational step: compose with transitivity rule ... *}
   8.163    from calculation
   8.164 -    -- {* ... and pick up the final result *};
   8.165 +    -- {* ... and pick up the final result *}
   8.166  
   8.167 -  show ?thesis; .;
   8.168 -qed;
   8.169 +  show ?thesis .
   8.170 +qed
   8.171  
   8.172  text {*
   8.173   Note that this scheme of calculations is not restricted to plain
   8.174 @@ -133,20 +133,20 @@
   8.175   \isacommand{also} and \isacommand{finally}, Isabelle/Isar maintains
   8.176   separate context information of ``transitivity'' rules.  Rule
   8.177   selection takes place automatically by higher-order unification.
   8.178 -*};
   8.179 +*}
   8.180  
   8.181  
   8.182 -subsection {* Groups as monoids *};
   8.183 +subsection {* Groups as monoids *}
   8.184  
   8.185  text {*
   8.186   Monoids over signature $({\times} :: \alpha \To \alpha \To \alpha,
   8.187   \idt{one} :: \alpha)$ are defined like this.
   8.188 -*};
   8.189 +*}
   8.190  
   8.191  axclass monoid < times
   8.192    monoid_assoc:       "(x * y) * z = x * (y * z)"
   8.193    monoid_left_unit:   "one * x = x"
   8.194 -  monoid_right_unit:  "x * one = x";
   8.195 +  monoid_right_unit:  "x * one = x"
   8.196  
   8.197  text {*
   8.198   Groups are \emph{not} yet monoids directly from the definition.  For
   8.199 @@ -156,13 +156,13 @@
   8.200   theorem of group theory (see page~\pageref{thm:group-right-unit}), we
   8.201   may still instantiate $\idt{group} \subset \idt{monoid}$ properly as
   8.202   follows.
   8.203 -*};
   8.204 +*}
   8.205  
   8.206 -instance group < monoid;
   8.207 +instance group < monoid
   8.208    by (intro_classes,
   8.209         rule group_assoc,
   8.210         rule group_left_unit,
   8.211 -       rule group_right_unit);
   8.212 +       rule group_right_unit)
   8.213  
   8.214  text {*
   8.215   The \isacommand{instance} command actually is a version of
   8.216 @@ -171,6 +171,6 @@
   8.217   language element may be involved to establish this statement.  When
   8.218   concluding the proof, the result is transformed into the intended
   8.219   type signature extension behind the scenes.
   8.220 -*};
   8.221 +*}
   8.222  
   8.223 -end;
   8.224 +end
     9.1 --- a/src/HOL/Isar_examples/KnasterTarski.thy	Sun Sep 17 22:15:08 2000 +0200
     9.2 +++ b/src/HOL/Isar_examples/KnasterTarski.thy	Sun Sep 17 22:19:02 2000 +0200
     9.3 @@ -5,12 +5,12 @@
     9.4  Typical textbook proof example.
     9.5  *)
     9.6  
     9.7 -header {* Textbook-style reasoning: the Knaster-Tarski Theorem *};
     9.8 +header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
     9.9  
    9.10 -theory KnasterTarski = Main:;
    9.11 +theory KnasterTarski = Main:
    9.12  
    9.13  
    9.14 -subsection {* Prose version *};
    9.15 +subsection {* Prose version *}
    9.16  
    9.17  text {*
    9.18   According to the textbook \cite[pages 93--94]{davey-priestley}, the
    9.19 @@ -28,10 +28,10 @@
    9.20   complete the proof that $a$ is a fixpoint.  Since $f$ is
    9.21   order-preserving, $f(f(a)) \le f(a)$.  This says $f(a) \in H$, so $a
    9.22   \le f(a)$.
    9.23 -*};
    9.24 +*}
    9.25  
    9.26  
    9.27 -subsection {* Formal versions *};
    9.28 +subsection {* Formal versions *}
    9.29  
    9.30  text {*
    9.31   The Isar proof below closely follows the original presentation.
    9.32 @@ -39,34 +39,34 @@
    9.33   formal Isar language elements.  Just as many textbook-style proofs,
    9.34   there is a strong bias towards forward proof, and several bends
    9.35   in the course of reasoning.
    9.36 -*};
    9.37 +*}
    9.38  
    9.39 -theorem KnasterTarski: "mono f ==> EX a::'a set. f a = a";
    9.40 -proof;
    9.41 -  let ?H = "{u. f u <= u}";
    9.42 -  let ?a = "Inter ?H";
    9.43 +theorem KnasterTarski: "mono f ==> EX a::'a set. f a = a"
    9.44 +proof
    9.45 +  let ?H = "{u. f u <= u}"
    9.46 +  let ?a = "Inter ?H"
    9.47  
    9.48 -  assume mono: "mono f";
    9.49 -  show "f ?a = ?a";
    9.50 -  proof -;
    9.51 -    {;
    9.52 -      fix x;
    9.53 -      assume H: "x : ?H";
    9.54 -      hence "?a <= x"; by (rule Inter_lower);
    9.55 -      with mono; have "f ?a <= f x"; ..;
    9.56 -      also; from H; have "... <= x"; ..;
    9.57 -      finally; have "f ?a <= x"; .;
    9.58 -    };
    9.59 -    hence ge: "f ?a <= ?a"; by (rule Inter_greatest);
    9.60 -    {;
    9.61 -      also; presume "... <= f ?a";
    9.62 -      finally (order_antisym); show ?thesis; .;
    9.63 -    };
    9.64 -    from mono ge; have "f (f ?a) <= f ?a"; ..;
    9.65 -    hence "f ?a : ?H"; ..;
    9.66 -    thus "?a <= f ?a"; by (rule Inter_lower);
    9.67 -  qed;
    9.68 -qed;
    9.69 +  assume mono: "mono f"
    9.70 +  show "f ?a = ?a"
    9.71 +  proof -
    9.72 +    {
    9.73 +      fix x
    9.74 +      assume H: "x : ?H"
    9.75 +      hence "?a <= x" by (rule Inter_lower)
    9.76 +      with mono have "f ?a <= f x" ..
    9.77 +      also from H have "... <= x" ..
    9.78 +      finally have "f ?a <= x" .
    9.79 +    }
    9.80 +    hence ge: "f ?a <= ?a" by (rule Inter_greatest)
    9.81 +    {
    9.82 +      also presume "... <= f ?a"
    9.83 +      finally (order_antisym) show ?thesis .
    9.84 +    }
    9.85 +    from mono ge have "f (f ?a) <= f ?a" ..
    9.86 +    hence "f ?a : ?H" ..
    9.87 +    thus "?a <= f ?a" by (rule Inter_lower)
    9.88 +  qed
    9.89 +qed
    9.90  
    9.91  text {*
    9.92   Above we have used several advanced Isar language elements, such as
    9.93 @@ -78,31 +78,31 @@
    9.94   level, while only the inner steps of reasoning are done in a forward
    9.95   manner.  We are certainly more at ease here, requiring only the most
    9.96   basic features of the Isar language.
    9.97 -*};
    9.98 +*}
    9.99  
   9.100 -theorem KnasterTarski': "mono f ==> EX a::'a set. f a = a";
   9.101 -proof;
   9.102 -  let ?H = "{u. f u <= u}";
   9.103 -  let ?a = "Inter ?H";
   9.104 +theorem KnasterTarski': "mono f ==> EX a::'a set. f a = a"
   9.105 +proof
   9.106 +  let ?H = "{u. f u <= u}"
   9.107 +  let ?a = "Inter ?H"
   9.108  
   9.109 -  assume mono: "mono f";
   9.110 -  show "f ?a = ?a";
   9.111 -  proof (rule order_antisym);
   9.112 -    show ge: "f ?a <= ?a";
   9.113 -    proof (rule Inter_greatest);
   9.114 -      fix x;
   9.115 -      assume H: "x : ?H";
   9.116 -      hence "?a <= x"; by (rule Inter_lower);
   9.117 -      with mono; have "f ?a <= f x"; ..;
   9.118 -      also; from H; have "... <= x"; ..;
   9.119 -      finally; show "f ?a <= x"; .;
   9.120 -    qed;
   9.121 -    show "?a <= f ?a";
   9.122 -    proof (rule Inter_lower);
   9.123 -      from mono ge; have "f (f ?a) <= f ?a"; ..;
   9.124 -      thus "f ?a : ?H"; ..;
   9.125 -    qed;
   9.126 -  qed;
   9.127 -qed;
   9.128 +  assume mono: "mono f"
   9.129 +  show "f ?a = ?a"
   9.130 +  proof (rule order_antisym)
   9.131 +    show ge: "f ?a <= ?a"
   9.132 +    proof (rule Inter_greatest)
   9.133 +      fix x
   9.134 +      assume H: "x : ?H"
   9.135 +      hence "?a <= x" by (rule Inter_lower)
   9.136 +      with mono have "f ?a <= f x" ..
   9.137 +      also from H have "... <= x" ..
   9.138 +      finally show "f ?a <= x" .
   9.139 +    qed
   9.140 +    show "?a <= f ?a"
   9.141 +    proof (rule Inter_lower)
   9.142 +      from mono ge have "f (f ?a) <= f ?a" ..
   9.143 +      thus "f ?a : ?H" ..
   9.144 +    qed
   9.145 +  qed
   9.146 +qed
   9.147  
   9.148 -end;
   9.149 +end
    10.1 --- a/src/HOL/Isar_examples/MultisetOrder.thy	Sun Sep 17 22:15:08 2000 +0200
    10.2 +++ b/src/HOL/Isar_examples/MultisetOrder.thy	Sun Sep 17 22:19:02 2000 +0200
    10.3 @@ -5,146 +5,146 @@
    10.4  Wellfoundedness proof for the multiset order.
    10.5  *)
    10.6  
    10.7 -header {* Wellfoundedness of multiset ordering *};
    10.8 +header {* Wellfoundedness of multiset ordering *}
    10.9  
   10.10 -theory MultisetOrder = Multiset:;
   10.11 +theory MultisetOrder = Multiset:
   10.12  
   10.13  text_raw {*
   10.14   \footnote{Original tactic script by Tobias Nipkow (see
   10.15   \url{http://isabelle.in.tum.de/library/HOL/Induct/Multiset.html}),
   10.16   based on a pen-and-paper proof due to Wilfried Buchholz.}\isanewline
   10.17 -*};
   10.18 +*}
   10.19  
   10.20  (* FIXME move? *)
   10.21 -theorems [induct type: multiset] = multiset_induct;
   10.22 -theorems [induct set: wf] = wf_induct;
   10.23 -theorems [induct set: acc] = acc_induct;
   10.24 +theorems [induct type: multiset] = multiset_induct
   10.25 +theorems [induct set: wf] = wf_induct
   10.26 +theorems [induct set: acc] = acc_induct
   10.27  
   10.28  
   10.29 -subsection {* A technical lemma *};
   10.30 +subsection {* A technical lemma *}
   10.31  
   10.32  lemma less_add: "(N, M0 + {#a#}) : mult1 r ==>
   10.33      (EX M. (M, M0) : mult1 r & N = M + {#a#}) |
   10.34      (EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K)"
   10.35 -  (concl is "?case1 (mult1 r) | ?case2");
   10.36 -proof (unfold mult1_def);
   10.37 -  let ?r = "\<lambda>K a. ALL b. b :# K --> (b, a) : r";
   10.38 -  let ?R = "\<lambda>N M. EX a M0 K. M = M0 + {#a#} & N = M0 + K & ?r K a";
   10.39 -  let ?case1 = "?case1 {(N, M). ?R N M}";
   10.40 +  (concl is "?case1 (mult1 r) | ?case2")
   10.41 +proof (unfold mult1_def)
   10.42 +  let ?r = "\<lambda>K a. ALL b. b :# K --> (b, a) : r"
   10.43 +  let ?R = "\<lambda>N M. EX a M0 K. M = M0 + {#a#} & N = M0 + K & ?r K a"
   10.44 +  let ?case1 = "?case1 {(N, M). ?R N M}"
   10.45  
   10.46 -  assume "(N, M0 + {#a#}) : {(N, M). ?R N M}";
   10.47 +  assume "(N, M0 + {#a#}) : {(N, M). ?R N M}"
   10.48    hence "EX a' M0' K.
   10.49 -      M0 + {#a#} = M0' + {#a'#} & N = M0' + K & ?r K a'"; by simp;
   10.50 -  thus "?case1 | ?case2";
   10.51 -  proof (elim exE conjE);
   10.52 -    fix a' M0' K;
   10.53 -    assume N: "N = M0' + K" and r: "?r K a'";
   10.54 -    assume "M0 + {#a#} = M0' + {#a'#}";
   10.55 +      M0 + {#a#} = M0' + {#a'#} & N = M0' + K & ?r K a'" by simp
   10.56 +  thus "?case1 | ?case2"
   10.57 +  proof (elim exE conjE)
   10.58 +    fix a' M0' K
   10.59 +    assume N: "N = M0' + K" and r: "?r K a'"
   10.60 +    assume "M0 + {#a#} = M0' + {#a'#}"
   10.61      hence "M0 = M0' & a = a' |
   10.62 -        (EX K'. M0 = K' + {#a'#} & M0' = K' + {#a#})";
   10.63 -      by (simp only: add_eq_conv_ex);
   10.64 -    thus ?thesis;
   10.65 -    proof (elim disjE conjE exE);
   10.66 -      assume "M0 = M0'" "a = a'";
   10.67 -      with N r; have "?r K a & N = M0 + K"; by simp;
   10.68 -      hence ?case2; ..; thus ?thesis; ..;
   10.69 -    next;
   10.70 -      fix K';
   10.71 -      assume "M0' = K' + {#a#}";
   10.72 -      with N; have n: "N = K' + K + {#a#}"; by (simp add: union_ac);
   10.73 +        (EX K'. M0 = K' + {#a'#} & M0' = K' + {#a#})"
   10.74 +      by (simp only: add_eq_conv_ex)
   10.75 +    thus ?thesis
   10.76 +    proof (elim disjE conjE exE)
   10.77 +      assume "M0 = M0'" "a = a'"
   10.78 +      with N r have "?r K a & N = M0 + K" by simp
   10.79 +      hence ?case2 .. thus ?thesis ..
   10.80 +    next
   10.81 +      fix K'
   10.82 +      assume "M0' = K' + {#a#}"
   10.83 +      with N have n: "N = K' + K + {#a#}" by (simp add: union_ac)
   10.84  
   10.85 -      assume "M0 = K' + {#a'#}";
   10.86 -      with r; have "?R (K' + K) M0"; by blast;
   10.87 -      with n; have ?case1; by simp; thus ?thesis; ..;
   10.88 -    qed;
   10.89 -  qed;
   10.90 -qed;
   10.91 +      assume "M0 = K' + {#a'#}"
   10.92 +      with r have "?R (K' + K) M0" by blast
   10.93 +      with n have ?case1 by simp thus ?thesis ..
   10.94 +    qed
   10.95 +  qed
   10.96 +qed
   10.97  
   10.98  
   10.99 -subsection {* The key property *};
  10.100 +subsection {* The key property *}
  10.101  
  10.102 -lemma all_accessible: "wf r ==> ALL M. M : acc (mult1 r)";
  10.103 -proof;
  10.104 -  let ?R = "mult1 r";
  10.105 -  let ?W = "acc ?R";
  10.106 -  {;
  10.107 -    fix M M0 a;
  10.108 +lemma all_accessible: "wf r ==> ALL M. M : acc (mult1 r)"
  10.109 +proof
  10.110 +  let ?R = "mult1 r"
  10.111 +  let ?W = "acc ?R"
  10.112 +  {
  10.113 +    fix M M0 a
  10.114      assume M0: "M0 : ?W"
  10.115        and wf_hyp: "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)"
  10.116 -      and acc_hyp: "ALL M. (M, M0) : ?R --> M + {#a#} : ?W";
  10.117 -    have "M0 + {#a#} : ?W";
  10.118 -    proof (rule accI [of "M0 + {#a#}"]);
  10.119 -      fix N;
  10.120 -      assume "(N, M0 + {#a#}) : ?R";
  10.121 +      and acc_hyp: "ALL M. (M, M0) : ?R --> M + {#a#} : ?W"
  10.122 +    have "M0 + {#a#} : ?W"
  10.123 +    proof (rule accI [of "M0 + {#a#}"])
  10.124 +      fix N
  10.125 +      assume "(N, M0 + {#a#}) : ?R"
  10.126        hence "((EX M. (M, M0) : ?R & N = M + {#a#}) |
  10.127 -          (EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K))";
  10.128 -	by (rule less_add);
  10.129 -      thus "N : ?W";
  10.130 -      proof (elim exE disjE conjE);
  10.131 -	fix M; assume "(M, M0) : ?R" and N: "N = M + {#a#}";
  10.132 -	from acc_hyp; have "(M, M0) : ?R --> M + {#a#} : ?W"; ..;
  10.133 -	hence "M + {#a#} : ?W"; ..;
  10.134 -	thus "N : ?W"; by (simp only: N);
  10.135 -      next;
  10.136 -	fix K;
  10.137 -	assume N: "N = M0 + K";
  10.138 -	assume "ALL b. b :# K --> (b, a) : r";
  10.139 -	have "?this --> M0 + K : ?W" (is "?P K");
  10.140 -	proof (induct K);
  10.141 -	  from M0; have "M0 + {#} : ?W"; by simp;
  10.142 -	  thus "?P {#}"; ..;
  10.143 +          (EX K. (ALL b. b :# K --> (b, a) : r) & N = M0 + K))"
  10.144 +	by (rule less_add)
  10.145 +      thus "N : ?W"
  10.146 +      proof (elim exE disjE conjE)
  10.147 +	fix M assume "(M, M0) : ?R" and N: "N = M + {#a#}"
  10.148 +	from acc_hyp have "(M, M0) : ?R --> M + {#a#} : ?W" ..
  10.149 +	hence "M + {#a#} : ?W" ..
  10.150 +	thus "N : ?W" by (simp only: N)
  10.151 +      next
  10.152 +	fix K
  10.153 +	assume N: "N = M0 + K"
  10.154 +	assume "ALL b. b :# K --> (b, a) : r"
  10.155 +	have "?this --> M0 + K : ?W" (is "?P K")
  10.156 +	proof (induct K)
  10.157 +	  from M0 have "M0 + {#} : ?W" by simp
  10.158 +	  thus "?P {#}" ..
  10.159  
  10.160 -	  fix K x; assume hyp: "?P K";
  10.161 -	  show "?P (K + {#x#})";
  10.162 -	  proof;
  10.163 -	    assume a: "ALL b. b :# (K + {#x#}) --> (b, a) : r";
  10.164 -	    hence "(x, a) : r"; by simp;
  10.165 -	    with wf_hyp; have b: "ALL M:?W. M + {#x#} : ?W"; by blast;
  10.166 +	  fix K x assume hyp: "?P K"
  10.167 +	  show "?P (K + {#x#})"
  10.168 +	  proof
  10.169 +	    assume a: "ALL b. b :# (K + {#x#}) --> (b, a) : r"
  10.170 +	    hence "(x, a) : r" by simp
  10.171 +	    with wf_hyp have b: "ALL M:?W. M + {#x#} : ?W" by blast
  10.172  
  10.173 -	    from a hyp; have "M0 + K : ?W"; by simp;
  10.174 -	    with b; have "(M0 + K) + {#x#} : ?W"; ..;
  10.175 -	    thus "M0 + (K + {#x#}) : ?W"; by (simp only: union_assoc);
  10.176 -	  qed;
  10.177 -	qed;
  10.178 -	hence "M0 + K : ?W"; ..;
  10.179 -	thus "N : ?W"; by (simp only: N);
  10.180 -      qed;
  10.181 -    qed;
  10.182 -  }; note tedious_reasoning = this;
  10.183 +	    from a hyp have "M0 + K : ?W" by simp
  10.184 +	    with b have "(M0 + K) + {#x#} : ?W" ..
  10.185 +	    thus "M0 + (K + {#x#}) : ?W" by (simp only: union_assoc)
  10.186 +	  qed
  10.187 +	qed
  10.188 +	hence "M0 + K : ?W" ..
  10.189 +	thus "N : ?W" by (simp only: N)
  10.190 +      qed
  10.191 +    qed
  10.192 +  } note tedious_reasoning = this
  10.193  
  10.194 -  assume wf: "wf r";
  10.195 -  fix M;
  10.196 -  show "M : ?W";
  10.197 -  proof (induct M);
  10.198 -    show "{#} : ?W";
  10.199 -    proof (rule accI);
  10.200 -      fix b; assume "(b, {#}) : ?R";
  10.201 -      with not_less_empty; show "b : ?W"; by contradiction;
  10.202 -    qed;
  10.203 +  assume wf: "wf r"
  10.204 +  fix M
  10.205 +  show "M : ?W"
  10.206 +  proof (induct M)
  10.207 +    show "{#} : ?W"
  10.208 +    proof (rule accI)
  10.209 +      fix b assume "(b, {#}) : ?R"
  10.210 +      with not_less_empty show "b : ?W" by contradiction
  10.211 +    qed
  10.212  
  10.213 -    fix M a; assume "M : ?W";
  10.214 -    from wf; have "ALL M:?W. M + {#a#} : ?W";
  10.215 -    proof induct;
  10.216 -      fix a;
  10.217 -      assume "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)";
  10.218 -      show "ALL M:?W. M + {#a#} : ?W";
  10.219 -      proof;
  10.220 -	fix M; assume "M : ?W";
  10.221 -	thus "M + {#a#} : ?W";
  10.222 -          by (rule acc_induct) (rule tedious_reasoning);
  10.223 -      qed;
  10.224 -    qed;
  10.225 -    thus "M + {#a#} : ?W"; ..;
  10.226 -  qed;
  10.227 -qed;
  10.228 +    fix M a assume "M : ?W"
  10.229 +    from wf have "ALL M:?W. M + {#a#} : ?W"
  10.230 +    proof induct
  10.231 +      fix a
  10.232 +      assume "ALL b. (b, a) : r --> (ALL M:?W. M + {#b#} : ?W)"
  10.233 +      show "ALL M:?W. M + {#a#} : ?W"
  10.234 +      proof
  10.235 +	fix M assume "M : ?W"
  10.236 +	thus "M + {#a#} : ?W"
  10.237 +          by (rule acc_induct) (rule tedious_reasoning)
  10.238 +      qed
  10.239 +    qed
  10.240 +    thus "M + {#a#} : ?W" ..
  10.241 +  qed
  10.242 +qed
  10.243  
  10.244  
  10.245 -subsection {* Main result *};
  10.246 +subsection {* Main result *}
  10.247  
  10.248 -theorem wf_mult1: "wf r ==> wf (mult1 r)";
  10.249 -  by (rule acc_wfI, rule all_accessible);
  10.250 +theorem wf_mult1: "wf r ==> wf (mult1 r)"
  10.251 +  by (rule acc_wfI, rule all_accessible)
  10.252  
  10.253 -theorem wf_mult: "wf r ==> wf (mult r)";
  10.254 -  by (unfold mult_def, rule wf_trancl, rule wf_mult1);
  10.255 +theorem wf_mult: "wf r ==> wf (mult r)"
  10.256 +  by (unfold mult_def, rule wf_trancl, rule wf_mult1)
  10.257  
  10.258 -end;
  10.259 +end
    11.1 --- a/src/HOL/Isar_examples/MutilatedCheckerboard.thy	Sun Sep 17 22:15:08 2000 +0200
    11.2 +++ b/src/HOL/Isar_examples/MutilatedCheckerboard.thy	Sun Sep 17 22:19:02 2000 +0200
    11.3 @@ -4,296 +4,296 @@
    11.4                  Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
    11.5  *)
    11.6  
    11.7 -header {* The Mutilated Checker Board Problem *};
    11.8 +header {* The Mutilated Checker Board Problem *}
    11.9  
   11.10 -theory MutilatedCheckerboard = Main:;
   11.11 +theory MutilatedCheckerboard = Main:
   11.12  
   11.13  text {*
   11.14   The Mutilated Checker Board Problem, formalized inductively.  See
   11.15   \cite{paulson-mutilated-board} and
   11.16   \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
   11.17   original tactic script version.
   11.18 -*};
   11.19 +*}
   11.20  
   11.21 -subsection {* Tilings *};
   11.22 +subsection {* Tilings *}
   11.23  
   11.24  consts
   11.25 -  tiling :: "'a set set => 'a set set";
   11.26 +  tiling :: "'a set set => 'a set set"
   11.27  
   11.28  inductive "tiling A"
   11.29    intros
   11.30      empty: "{} : tiling A"
   11.31      Un:    "a : A ==> t : tiling A ==> a <= - t
   11.32 -              ==> a Un t : tiling A";
   11.33 +              ==> a Un t : tiling A"
   11.34  
   11.35  
   11.36 -text "The union of two disjoint tilings is a tiling.";
   11.37 +text "The union of two disjoint tilings is a tiling."
   11.38  
   11.39  lemma tiling_Un:
   11.40    "t : tiling A --> u : tiling A --> t Int u = {}
   11.41 -    --> t Un u : tiling A";
   11.42 -proof;
   11.43 -  assume "t : tiling A" (is "_ : ?T");
   11.44 -  thus "u : ?T --> t Int u = {} --> t Un u : ?T" (is "?P t");
   11.45 -  proof (induct (stripped) t);
   11.46 +    --> t Un u : tiling A"
   11.47 +proof
   11.48 +  assume "t : tiling A" (is "_ : ?T")
   11.49 +  thus "u : ?T --> t Int u = {} --> t Un u : ?T" (is "?P t")
   11.50 +  proof (induct (stripped) t)
   11.51      assume "u : ?T" "{} Int u = {}"
   11.52 -    thus "{} Un u : ?T" by simp;
   11.53 +    thus "{} Un u : ?T" by simp
   11.54    next
   11.55 -    fix a t;
   11.56 -    assume "a : A" "t : ?T" "?P t" "a <= - t";
   11.57 -    assume "u : ?T" "(a Un t) Int u = {}";
   11.58 -    have hyp: "t Un u: ?T"; by (blast!);
   11.59 -    have "a <= - (t Un u)"; by (blast!);
   11.60 -    with _ hyp; have "a Un (t Un u) : ?T"; by (rule tiling.Un);
   11.61 -    also; have "a Un (t Un u) = (a Un t) Un u";
   11.62 -      by (simp only: Un_assoc);
   11.63 -    finally; show "... : ?T"; .;
   11.64 -  qed;
   11.65 -qed;
   11.66 +    fix a t
   11.67 +    assume "a : A" "t : ?T" "?P t" "a <= - t"
   11.68 +    assume "u : ?T" "(a Un t) Int u = {}"
   11.69 +    have hyp: "t Un u: ?T" by (blast!)
   11.70 +    have "a <= - (t Un u)" by (blast!)
   11.71 +    with _ hyp have "a Un (t Un u) : ?T" by (rule tiling.Un)
   11.72 +    also have "a Un (t Un u) = (a Un t) Un u"
   11.73 +      by (simp only: Un_assoc)
   11.74 +    finally show "... : ?T" .
   11.75 +  qed
   11.76 +qed
   11.77  
   11.78  
   11.79 -subsection {* Basic properties of ``below'' *};
   11.80 +subsection {* Basic properties of ``below'' *}
   11.81  
   11.82  constdefs
   11.83    below :: "nat => nat set"
   11.84 -  "below n == {i. i < n}";
   11.85 +  "below n == {i. i < n}"
   11.86  
   11.87 -lemma below_less_iff [iff]: "(i: below k) = (i < k)";
   11.88 -  by (simp add: below_def);
   11.89 +lemma below_less_iff [iff]: "(i: below k) = (i < k)"
   11.90 +  by (simp add: below_def)
   11.91  
   11.92 -lemma below_0: "below 0 = {}";
   11.93 -  by (simp add: below_def);
   11.94 +lemma below_0: "below 0 = {}"
   11.95 +  by (simp add: below_def)
   11.96  
   11.97  lemma Sigma_Suc1:
   11.98 -    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)";
   11.99 -  by (simp add: below_def less_Suc_eq) blast;
  11.100 +    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
  11.101 +  by (simp add: below_def less_Suc_eq) blast
  11.102  
  11.103  lemma Sigma_Suc2:
  11.104      "m = n + 2 ==> A <*> below m =
  11.105 -      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)";
  11.106 -  by (auto simp add: below_def) arith;
  11.107 +      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
  11.108 +  by (auto simp add: below_def) arith
  11.109  
  11.110 -lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2;
  11.111 +lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
  11.112  
  11.113  
  11.114 -subsection {* Basic properties of ``evnodd'' *};
  11.115 +subsection {* Basic properties of ``evnodd'' *}
  11.116  
  11.117  constdefs
  11.118    evnodd :: "(nat * nat) set => nat => (nat * nat) set"
  11.119 -  "evnodd A b == A Int {(i, j). (i + j) mod #2 = b}";
  11.120 +  "evnodd A b == A Int {(i, j). (i + j) mod #2 = b}"
  11.121  
  11.122  lemma evnodd_iff:
  11.123 -    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod #2 = b)";
  11.124 -  by (simp add: evnodd_def);
  11.125 +    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod #2 = b)"
  11.126 +  by (simp add: evnodd_def)
  11.127  
  11.128 -lemma evnodd_subset: "evnodd A b <= A";
  11.129 -  by (unfold evnodd_def, rule Int_lower1);
  11.130 +lemma evnodd_subset: "evnodd A b <= A"
  11.131 +  by (unfold evnodd_def, rule Int_lower1)
  11.132  
  11.133 -lemma evnoddD: "x : evnodd A b ==> x : A";
  11.134 -  by (rule subsetD, rule evnodd_subset);
  11.135 +lemma evnoddD: "x : evnodd A b ==> x : A"
  11.136 +  by (rule subsetD, rule evnodd_subset)
  11.137  
  11.138 -lemma evnodd_finite: "finite A ==> finite (evnodd A b)";
  11.139 -  by (rule finite_subset, rule evnodd_subset);
  11.140 +lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
  11.141 +  by (rule finite_subset, rule evnodd_subset)
  11.142  
  11.143 -lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b";
  11.144 -  by (unfold evnodd_def) blast;
  11.145 +lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
  11.146 +  by (unfold evnodd_def) blast
  11.147  
  11.148 -lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b";
  11.149 -  by (unfold evnodd_def) blast;
  11.150 +lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
  11.151 +  by (unfold evnodd_def) blast
  11.152  
  11.153 -lemma evnodd_empty: "evnodd {} b = {}";
  11.154 -  by (simp add: evnodd_def);
  11.155 +lemma evnodd_empty: "evnodd {} b = {}"
  11.156 +  by (simp add: evnodd_def)
  11.157  
  11.158  lemma evnodd_insert: "evnodd (insert (i, j) C) b =
  11.159      (if (i + j) mod #2 = b
  11.160 -      then insert (i, j) (evnodd C b) else evnodd C b)";
  11.161 -  by (simp add: evnodd_def) blast;
  11.162 +      then insert (i, j) (evnodd C b) else evnodd C b)"
  11.163 +  by (simp add: evnodd_def) blast
  11.164  
  11.165  
  11.166 -subsection {* Dominoes *};
  11.167 +subsection {* Dominoes *}
  11.168  
  11.169  consts 
  11.170 -  domino :: "(nat * nat) set set";
  11.171 +  domino :: "(nat * nat) set set"
  11.172  
  11.173  inductive domino
  11.174    intros
  11.175      horiz:  "{(i, j), (i, j + 1)} : domino"
  11.176 -    vertl:  "{(i, j), (i + 1, j)} : domino";
  11.177 +    vertl:  "{(i, j), (i + 1, j)} : domino"
  11.178  
  11.179  lemma dominoes_tile_row:
  11.180    "{i} <*> below (2 * n) : tiling domino"
  11.181 -  (is "?P n" is "?B n : ?T");
  11.182 -proof (induct n);
  11.183 -  show "?P 0"; by (simp add: below_0 tiling.empty);
  11.184 +  (is "?P n" is "?B n : ?T")
  11.185 +proof (induct n)
  11.186 +  show "?P 0" by (simp add: below_0 tiling.empty)
  11.187  
  11.188 -  fix n; assume hyp: "?P n";
  11.189 -  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}";
  11.190 +  fix n assume hyp: "?P n"
  11.191 +  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
  11.192  
  11.193 -  have "?B (Suc n) = ?a Un ?B n";
  11.194 -    by (auto simp add: Sigma_Suc Un_assoc);
  11.195 -  also; have "... : ?T";
  11.196 -  proof (rule tiling.Un);
  11.197 -    have "{(i, 2 * n), (i, 2 * n + 1)} : domino";
  11.198 -      by (rule domino.horiz);
  11.199 -    also; have "{(i, 2 * n), (i, 2 * n + 1)} = ?a"; by blast;
  11.200 -    finally; show "... : domino"; .;
  11.201 -    from hyp; show "?B n : ?T"; .;
  11.202 -    show "?a <= - ?B n"; by blast;
  11.203 -  qed;
  11.204 -  finally; show "?P (Suc n)"; .;
  11.205 -qed;
  11.206 +  have "?B (Suc n) = ?a Un ?B n"
  11.207 +    by (auto simp add: Sigma_Suc Un_assoc)
  11.208 +  also have "... : ?T"
  11.209 +  proof (rule tiling.Un)
  11.210 +    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
  11.211 +      by (rule domino.horiz)
  11.212 +    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
  11.213 +    finally show "... : domino" .
  11.214 +    from hyp show "?B n : ?T" .
  11.215 +    show "?a <= - ?B n" by blast
  11.216 +  qed
  11.217 +  finally show "?P (Suc n)" .
  11.218 +qed
  11.219  
  11.220  lemma dominoes_tile_matrix:
  11.221    "below m <*> below (2 * n) : tiling domino"
  11.222 -  (is "?P m" is "?B m : ?T");
  11.223 -proof (induct m);
  11.224 -  show "?P 0"; by (simp add: below_0 tiling.empty);
  11.225 +  (is "?P m" is "?B m : ?T")
  11.226 +proof (induct m)
  11.227 +  show "?P 0" by (simp add: below_0 tiling.empty)
  11.228  
  11.229 -  fix m; assume hyp: "?P m";
  11.230 -  let ?t = "{m} <*> below (2 * n)";
  11.231 +  fix m assume hyp: "?P m"
  11.232 +  let ?t = "{m} <*> below (2 * n)"
  11.233  
  11.234 -  have "?B (Suc m) = ?t Un ?B m"; by (simp add: Sigma_Suc);
  11.235 -  also; have "... : ?T";
  11.236 -  proof (rule tiling_Un [rule_format]);
  11.237 -    show "?t : ?T"; by (rule dominoes_tile_row);
  11.238 -    from hyp; show "?B m : ?T"; .;
  11.239 -    show "?t Int ?B m = {}"; by blast;
  11.240 -  qed;
  11.241 -  finally; show "?P (Suc m)"; .;
  11.242 -qed;
  11.243 +  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
  11.244 +  also have "... : ?T"
  11.245 +  proof (rule tiling_Un [rule_format])
  11.246 +    show "?t : ?T" by (rule dominoes_tile_row)
  11.247 +    from hyp show "?B m : ?T" .
  11.248 +    show "?t Int ?B m = {}" by blast
  11.249 +  qed
  11.250 +  finally show "?P (Suc m)" .
  11.251 +qed
  11.252  
  11.253  lemma domino_singleton:
  11.254 -  "d : domino ==> b < 2 ==> EX i j. evnodd d b = {(i, j)}";
  11.255 -proof -;
  11.256 -  assume b: "b < 2";
  11.257 -  assume "d : domino";
  11.258 -  thus ?thesis (is "?P d");
  11.259 -  proof induct;
  11.260 -    from b; have b_cases: "b = 0 | b = 1"; by arith;
  11.261 -    fix i j;
  11.262 -    note [simp] = evnodd_empty evnodd_insert mod_Suc;
  11.263 -    from b_cases; show "?P {(i, j), (i, j + 1)}"; by rule auto;
  11.264 -    from b_cases; show "?P {(i, j), (i + 1, j)}"; by rule auto;
  11.265 -  qed;
  11.266 -qed;
  11.267 +  "d : domino ==> b < 2 ==> EX i j. evnodd d b = {(i, j)}"
  11.268 +proof -
  11.269 +  assume b: "b < 2"
  11.270 +  assume "d : domino"
  11.271 +  thus ?thesis (is "?P d")
  11.272 +  proof induct
  11.273 +    from b have b_cases: "b = 0 | b = 1" by arith
  11.274 +    fix i j
  11.275 +    note [simp] = evnodd_empty evnodd_insert mod_Suc
  11.276 +    from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
  11.277 +    from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
  11.278 +  qed
  11.279 +qed
  11.280  
  11.281 -lemma domino_finite: "d: domino ==> finite d";
  11.282 -proof (induct set: domino);
  11.283 -  fix i j :: nat;
  11.284 -  show "finite {(i, j), (i, j + 1)}"; by (intro Finites.intros);
  11.285 -  show "finite {(i, j), (i + 1, j)}"; by (intro Finites.intros);
  11.286 -qed;
  11.287 +lemma domino_finite: "d: domino ==> finite d"
  11.288 +proof (induct set: domino)
  11.289 +  fix i j :: nat
  11.290 +  show "finite {(i, j), (i, j + 1)}" by (intro Finites.intros)
  11.291 +  show "finite {(i, j), (i + 1, j)}" by (intro Finites.intros)
  11.292 +qed
  11.293  
  11.294  
  11.295 -subsection {* Tilings of dominoes *};
  11.296 +subsection {* Tilings of dominoes *}
  11.297  
  11.298  lemma tiling_domino_finite:
  11.299 -  "t : tiling domino ==> finite t" (is "t : ?T ==> ?F t");
  11.300 -proof -;
  11.301 -  assume "t : ?T";
  11.302 -  thus "?F t";
  11.303 -  proof induct;
  11.304 -    show "?F {}"; by (rule Finites.emptyI);
  11.305 -    fix a t; assume "?F t";
  11.306 -    assume "a : domino"; hence "?F a"; by (rule domino_finite);
  11.307 -    thus "?F (a Un t)"; by (rule finite_UnI);
  11.308 -  qed;
  11.309 -qed;
  11.310 +  "t : tiling domino ==> finite t" (is "t : ?T ==> ?F t")
  11.311 +proof -
  11.312 +  assume "t : ?T"
  11.313 +  thus "?F t"
  11.314 +  proof induct
  11.315 +    show "?F {}" by (rule Finites.emptyI)
  11.316 +    fix a t assume "?F t"
  11.317 +    assume "a : domino" hence "?F a" by (rule domino_finite)
  11.318 +    thus "?F (a Un t)" by (rule finite_UnI)
  11.319 +  qed
  11.320 +qed
  11.321  
  11.322  lemma tiling_domino_01:
  11.323    "t : tiling domino ==> card (evnodd t 0) = card (evnodd t 1)"
  11.324 -  (is "t : ?T ==> ?P t");
  11.325 -proof -;
  11.326 -  assume "t : ?T";
  11.327 -  thus "?P t";
  11.328 -  proof induct;
  11.329 -    show "?P {}"; by (simp add: evnodd_def);
  11.330 +  (is "t : ?T ==> ?P t")
  11.331 +proof -
  11.332 +  assume "t : ?T"
  11.333 +  thus "?P t"
  11.334 +  proof induct
  11.335 +    show "?P {}" by (simp add: evnodd_def)
  11.336  
  11.337 -    fix a t;
  11.338 -    let ?e = evnodd;
  11.339 +    fix a t
  11.340 +    let ?e = evnodd
  11.341      assume "a : domino" "t : ?T"
  11.342        and hyp: "card (?e t 0) = card (?e t 1)"
  11.343 -      and "a <= - t";
  11.344 +      and "a <= - t"
  11.345  
  11.346      have card_suc:
  11.347 -      "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))";
  11.348 -    proof -;
  11.349 -      fix b; assume "b < 2";
  11.350 -      have "?e (a Un t) b = ?e a b Un ?e t b"; by (rule evnodd_Un);
  11.351 -      also; obtain i j where "?e a b = {(i, j)}";
  11.352 -      proof -;
  11.353 -	have "EX i j. ?e a b = {(i, j)}"; by (rule domino_singleton);
  11.354 -	thus ?thesis; by blast;
  11.355 -      qed;
  11.356 -      also; have "... Un ?e t b = insert (i, j) (?e t b)"; by simp;
  11.357 -      also; have "card ... = Suc (card (?e t b))";
  11.358 -      proof (rule card_insert_disjoint);
  11.359 -	show "finite (?e t b)";
  11.360 -          by (rule evnodd_finite, rule tiling_domino_finite);
  11.361 -	have "(i, j) : ?e a b"; by (simp!);
  11.362 -	thus "(i, j) ~: ?e t b"; by (blast! dest: evnoddD);
  11.363 -      qed;
  11.364 -      finally; show "?thesis b"; .;
  11.365 -    qed;
  11.366 -    hence "card (?e (a Un t) 0) = Suc (card (?e t 0))"; by simp;
  11.367 -    also; from hyp; have "card (?e t 0) = card (?e t 1)"; .;
  11.368 -    also; from card_suc; have "Suc ... = card (?e (a Un t) 1)";
  11.369 -      by simp;
  11.370 -    finally; show "?P (a Un t)"; .;
  11.371 -  qed;
  11.372 -qed;
  11.373 +      "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
  11.374 +    proof -
  11.375 +      fix b assume "b < 2"
  11.376 +      have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
  11.377 +      also obtain i j where "?e a b = {(i, j)}"
  11.378 +      proof -
  11.379 +	have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
  11.380 +	thus ?thesis by blast
  11.381 +      qed
  11.382 +      also have "... Un ?e t b = insert (i, j) (?e t b)" by simp
  11.383 +      also have "card ... = Suc (card (?e t b))"
  11.384 +      proof (rule card_insert_disjoint)
  11.385 +	show "finite (?e t b)"
  11.386 +          by (rule evnodd_finite, rule tiling_domino_finite)
  11.387 +	have "(i, j) : ?e a b" by (simp!)
  11.388 +	thus "(i, j) ~: ?e t b" by (blast! dest: evnoddD)
  11.389 +      qed
  11.390 +      finally show "?thesis b" .
  11.391 +    qed
  11.392 +    hence "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
  11.393 +    also from hyp have "card (?e t 0) = card (?e t 1)" .
  11.394 +    also from card_suc have "Suc ... = card (?e (a Un t) 1)"
  11.395 +      by simp
  11.396 +    finally show "?P (a Un t)" .
  11.397 +  qed
  11.398 +qed
  11.399  
  11.400  
  11.401 -subsection {* Main theorem *};
  11.402 +subsection {* Main theorem *}
  11.403  
  11.404  constdefs
  11.405    mutilated_board :: "nat => nat => (nat * nat) set"
  11.406    "mutilated_board m n ==
  11.407      below (2 * (m + 1)) <*> below (2 * (n + 1))
  11.408 -      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}";
  11.409 +      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
  11.410  
  11.411 -theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino";
  11.412 -proof (unfold mutilated_board_def);
  11.413 -  let ?T = "tiling domino";
  11.414 -  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))";
  11.415 -  let ?t' = "?t - {(0, 0)}";
  11.416 -  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}";
  11.417 +theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
  11.418 +proof (unfold mutilated_board_def)
  11.419 +  let ?T = "tiling domino"
  11.420 +  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
  11.421 +  let ?t' = "?t - {(0, 0)}"
  11.422 +  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
  11.423  
  11.424 -  show "?t'' ~: ?T";
  11.425 -  proof;
  11.426 -    have t: "?t : ?T"; by (rule dominoes_tile_matrix);
  11.427 -    assume t'': "?t'' : ?T";
  11.428 +  show "?t'' ~: ?T"
  11.429 +  proof
  11.430 +    have t: "?t : ?T" by (rule dominoes_tile_matrix)
  11.431 +    assume t'': "?t'' : ?T"
  11.432  
  11.433 -    let ?e = evnodd;
  11.434 -    have fin: "finite (?e ?t 0)";
  11.435 -      by (rule evnodd_finite, rule tiling_domino_finite, rule t);
  11.436 +    let ?e = evnodd
  11.437 +    have fin: "finite (?e ?t 0)"
  11.438 +      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
  11.439  
  11.440 -    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff;
  11.441 -    have "card (?e ?t'' 0) < card (?e ?t' 0)";
  11.442 -    proof -;
  11.443 +    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
  11.444 +    have "card (?e ?t'' 0) < card (?e ?t' 0)"
  11.445 +    proof -
  11.446        have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
  11.447 -        < card (?e ?t' 0)";
  11.448 -      proof (rule card_Diff1_less);
  11.449 -	from _ fin; show "finite (?e ?t' 0)";
  11.450 -          by (rule finite_subset) auto;
  11.451 -	show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0"; by simp;
  11.452 -      qed;
  11.453 -      thus ?thesis; by simp;
  11.454 -    qed;
  11.455 -    also; have "... < card (?e ?t 0)";
  11.456 -    proof -;
  11.457 -      have "(0, 0) : ?e ?t 0"; by simp;
  11.458 -      with fin; have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)";
  11.459 -        by (rule card_Diff1_less);
  11.460 -      thus ?thesis; by simp;
  11.461 -    qed;
  11.462 -    also; from t; have "... = card (?e ?t 1)";
  11.463 -      by (rule tiling_domino_01);
  11.464 -    also; have "?e ?t 1 = ?e ?t'' 1"; by simp;
  11.465 -    also; from t''; have "card ... = card (?e ?t'' 0)";
  11.466 -      by (rule tiling_domino_01 [symmetric]);
  11.467 -    finally; have "... < ..."; .; thus False; ..;
  11.468 -  qed;
  11.469 -qed;
  11.470 +        < card (?e ?t' 0)"
  11.471 +      proof (rule card_Diff1_less)
  11.472 +	from _ fin show "finite (?e ?t' 0)"
  11.473 +          by (rule finite_subset) auto
  11.474 +	show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
  11.475 +      qed
  11.476 +      thus ?thesis by simp
  11.477 +    qed
  11.478 +    also have "... < card (?e ?t 0)"
  11.479 +    proof -
  11.480 +      have "(0, 0) : ?e ?t 0" by simp
  11.481 +      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
  11.482 +        by (rule card_Diff1_less)
  11.483 +      thus ?thesis by simp
  11.484 +    qed
  11.485 +    also from t have "... = card (?e ?t 1)"
  11.486 +      by (rule tiling_domino_01)
  11.487 +    also have "?e ?t 1 = ?e ?t'' 1" by simp
  11.488 +    also from t'' have "card ... = card (?e ?t'' 0)"
  11.489 +      by (rule tiling_domino_01 [symmetric])
  11.490 +    finally have "... < ..." . thus False ..
  11.491 +  qed
  11.492 +qed
  11.493  
  11.494 -end;
  11.495 +end
    12.1 --- a/src/HOL/Isar_examples/NestedDatatype.thy	Sun Sep 17 22:15:08 2000 +0200
    12.2 +++ b/src/HOL/Isar_examples/NestedDatatype.thy	Sun Sep 17 22:19:02 2000 +0200
    12.3 @@ -1,91 +1,91 @@
    12.4  
    12.5 -header {* Nested datatypes *};
    12.6 +header {* Nested datatypes *}
    12.7  
    12.8 -theory NestedDatatype = Main:;
    12.9 +theory NestedDatatype = Main:
   12.10  
   12.11 -subsection {* Terms and substitution *};
   12.12 +subsection {* Terms and substitution *}
   12.13  
   12.14  datatype ('a, 'b) "term" =
   12.15      Var 'a
   12.16 -  | App 'b "('a, 'b) term list";
   12.17 +  | App 'b "('a, 'b) term list"
   12.18  
   12.19  consts
   12.20    subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
   12.21    subst_term_list ::
   12.22 -    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list";
   12.23 +    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
   12.24  
   12.25  primrec (subst)
   12.26    "subst_term f (Var a) = f a"
   12.27    "subst_term f (App b ts) = App b (subst_term_list f ts)"
   12.28    "subst_term_list f [] = []"
   12.29 -  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts";
   12.30 +  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   12.31  
   12.32  
   12.33  text {*
   12.34   \medskip A simple lemma about composition of substitutions.
   12.35 -*};
   12.36 +*}
   12.37  
   12.38  lemma
   12.39     "subst_term (subst_term f1 o f2) t =
   12.40        subst_term f1 (subst_term f2 t) &
   12.41      subst_term_list (subst_term f1 o f2) ts =
   12.42 -      subst_term_list f1 (subst_term_list f2 ts)";
   12.43 -  by (induct t and ts rule: term.induct) simp_all;
   12.44 +      subst_term_list f1 (subst_term_list f2 ts)"
   12.45 +  by (induct t and ts rule: term.induct) simp_all
   12.46  
   12.47  lemma "subst_term (subst_term f1 o f2) t =
   12.48 -  subst_term f1 (subst_term f2 t)";
   12.49 -proof -;
   12.50 -  let "?P t" = ?thesis;
   12.51 -  let ?Q = "\\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   12.52 -    subst_term_list f1 (subst_term_list f2 ts)";
   12.53 -  show ?thesis;
   12.54 -  proof (induct t);
   12.55 -    fix a; show "?P (Var a)"; by simp;
   12.56 -  next;
   12.57 -    fix b ts; assume "?Q ts";
   12.58 -    thus "?P (App b ts)"; by (simp add: o_def);
   12.59 -  next;
   12.60 -    show "?Q []"; by simp;
   12.61 -  next;
   12.62 -    fix t ts;
   12.63 -    assume "?P t" "?Q ts"; thus "?Q (t # ts)"; by simp;
   12.64 -  qed;
   12.65 -qed;
   12.66 +  subst_term f1 (subst_term f2 t)"
   12.67 +proof -
   12.68 +  let "?P t" = ?thesis
   12.69 +  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   12.70 +    subst_term_list f1 (subst_term_list f2 ts)"
   12.71 +  show ?thesis
   12.72 +  proof (induct t)
   12.73 +    fix a show "?P (Var a)" by simp
   12.74 +  next
   12.75 +    fix b ts assume "?Q ts"
   12.76 +    thus "?P (App b ts)" by (simp add: o_def)
   12.77 +  next
   12.78 +    show "?Q []" by simp
   12.79 +  next
   12.80 +    fix t ts
   12.81 +    assume "?P t" "?Q ts" thus "?Q (t # ts)" by simp
   12.82 +  qed
   12.83 +qed
   12.84  
   12.85  
   12.86 -subsection {* Alternative induction *};
   12.87 +subsection {* Alternative induction *}
   12.88  
   12.89  theorem term_induct' [case_names Var App]:
   12.90    "(!!a. P (Var a)) ==>
   12.91 -   (!!b ts. list_all P ts ==> P (App b ts)) ==> P t";
   12.92 -proof -;
   12.93 -  assume var: "!!a. P (Var a)";
   12.94 -  assume app: "!!b ts. list_all P ts ==> P (App b ts)";
   12.95 -  show ?thesis;
   12.96 -  proof (induct P t);
   12.97 -    fix a; show "P (Var a)"; by (rule var);
   12.98 -  next;
   12.99 -    fix b t ts; assume "list_all P ts";
  12.100 -    thus "P (App b ts)"; by (rule app);
  12.101 -  next;
  12.102 -    show "list_all P []"; by simp;
  12.103 -  next;
  12.104 -    fix t ts; assume "P t" "list_all P ts";
  12.105 -    thus "list_all P (t # ts)"; by simp;
  12.106 -  qed;
  12.107 -qed;
  12.108 +   (!!b ts. list_all P ts ==> P (App b ts)) ==> P t"
  12.109 +proof -
  12.110 +  assume var: "!!a. P (Var a)"
  12.111 +  assume app: "!!b ts. list_all P ts ==> P (App b ts)"
  12.112 +  show ?thesis
  12.113 +  proof (induct P t)
  12.114 +    fix a show "P (Var a)" by (rule var)
  12.115 +  next
  12.116 +    fix b t ts assume "list_all P ts"
  12.117 +    thus "P (App b ts)" by (rule app)
  12.118 +  next
  12.119 +    show "list_all P []" by simp
  12.120 +  next
  12.121 +    fix t ts assume "P t" "list_all P ts"
  12.122 +    thus "list_all P (t # ts)" by simp
  12.123 +  qed
  12.124 +qed
  12.125  
  12.126  lemma
  12.127    "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
  12.128 -  (is "?P t");
  12.129 -proof (induct (open) ?P t rule: term_induct');
  12.130 -  case Var;
  12.131 -  show "?P (Var a)"; by (simp add: o_def);
  12.132 -next;
  12.133 -  case App;
  12.134 -  have "?this --> ?P (App b ts)";
  12.135 -    by (induct ts) simp_all;
  12.136 -  thus "..."; ..;
  12.137 -qed;
  12.138 +  (is "?P t")
  12.139 +proof (induct (open) ?P t rule: term_induct')
  12.140 +  case Var
  12.141 +  show "?P (Var a)" by (simp add: o_def)
  12.142 +next
  12.143 +  case App
  12.144 +  have "?this --> ?P (App b ts)"
  12.145 +    by (induct ts) simp_all
  12.146 +  thus "..." ..
  12.147 +qed
  12.148  
  12.149 -end;
  12.150 +end
    13.1 --- a/src/HOL/Isar_examples/Peirce.thy	Sun Sep 17 22:15:08 2000 +0200
    13.2 +++ b/src/HOL/Isar_examples/Peirce.thy	Sun Sep 17 22:19:02 2000 +0200
    13.3 @@ -3,9 +3,9 @@
    13.4      Author:     Markus Wenzel, TU Muenchen
    13.5  *)
    13.6  
    13.7 -header {* Peirce's Law *};
    13.8 +header {* Peirce's Law *}
    13.9  
   13.10 -theory Peirce = Main:;
   13.11 +theory Peirce = Main:
   13.12  
   13.13  text {*
   13.14   We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.  This is
   13.15 @@ -17,22 +17,22 @@
   13.16   the negated goal may be introduced as additional assumption.  This
   13.17   eventually leads to a contradiction.\footnote{The rule involved there
   13.18   is negation elimination; it holds in intuitionistic logic as well.}
   13.19 -*};
   13.20 +*}
   13.21  
   13.22 -theorem "((A --> B) --> A) --> A";
   13.23 -proof;
   13.24 -  assume aba: "(A --> B) --> A";
   13.25 -  show A;
   13.26 -  proof (rule classical);
   13.27 -    assume "~ A";
   13.28 -    have "A --> B";
   13.29 -    proof;
   13.30 -      assume A;
   13.31 -      thus B; by contradiction;
   13.32 -    qed;
   13.33 -    with aba; show A; ..;
   13.34 -  qed;
   13.35 -qed;
   13.36 +theorem "((A --> B) --> A) --> A"
   13.37 +proof
   13.38 +  assume aba: "(A --> B) --> A"
   13.39 +  show A
   13.40 +  proof (rule classical)
   13.41 +    assume "~ A"
   13.42 +    have "A --> B"
   13.43 +    proof
   13.44 +      assume A
   13.45 +      thus B by contradiction
   13.46 +    qed
   13.47 +    with aba show A ..
   13.48 +  qed
   13.49 +qed
   13.50  
   13.51  text {*
   13.52   In the subsequent version the reasoning is rearranged by means of
   13.53 @@ -48,24 +48,24 @@
   13.54   give rise to new subgoals, which may be established separately.  In
   13.55   contrast, strong assumptions (as introduced by \isacommand{assume})
   13.56   are solved immediately.
   13.57 -*};
   13.58 +*}
   13.59  
   13.60 -theorem "((A --> B) --> A) --> A";
   13.61 -proof;
   13.62 -  assume aba: "(A --> B) --> A";
   13.63 -  show A;
   13.64 -  proof (rule classical);
   13.65 -    presume "A --> B";
   13.66 -    with aba; show A; ..;
   13.67 -  next;
   13.68 -    assume "~ A";
   13.69 -    show "A --> B";
   13.70 -    proof;
   13.71 -      assume A;
   13.72 -      thus B; by contradiction;
   13.73 -    qed;
   13.74 -  qed;
   13.75 -qed;
   13.76 +theorem "((A --> B) --> A) --> A"
   13.77 +proof
   13.78 +  assume aba: "(A --> B) --> A"
   13.79 +  show A
   13.80 +  proof (rule classical)
   13.81 +    presume "A --> B"
   13.82 +    with aba show A ..
   13.83 +  next
   13.84 +    assume "~ A"
   13.85 +    show "A --> B"
   13.86 +    proof
   13.87 +      assume A
   13.88 +      thus B by contradiction
   13.89 +    qed
   13.90 +  qed
   13.91 +qed
   13.92  
   13.93  text {*
   13.94   Note that the goals stemming from weak assumptions may be even left
   13.95 @@ -84,6 +84,6 @@
   13.96   assumptions these may be still solved in any order in a predictable
   13.97   way, while weak ones would quickly lead to great confusion,
   13.98   eventually demanding even some backtracking.
   13.99 -*};
  13.100 +*}
  13.101  
  13.102 -end;
  13.103 +end
    14.1 --- a/src/HOL/Isar_examples/Puzzle.thy	Sun Sep 17 22:15:08 2000 +0200
    14.2 +++ b/src/HOL/Isar_examples/Puzzle.thy	Sun Sep 17 22:19:02 2000 +0200
    14.3 @@ -1,128 +1,128 @@
    14.4  
    14.5 -header {* An old chestnut *};
    14.6 +header {* An old chestnut *}
    14.7  
    14.8 -theory Puzzle = Main:;
    14.9 +theory Puzzle = Main:
   14.10  
   14.11  text_raw {*
   14.12   \footnote{A question from ``Bundeswettbewerb Mathematik''.
   14.13   Original pen-and-paper proof due to Herbert Ehler; Isabelle tactic
   14.14   script by Tobias Nipkow.}
   14.15 -*};
   14.16 +*}
   14.17  
   14.18  
   14.19 -subsection {* Generalized mathematical induction *};
   14.20 +subsection {* Generalized mathematical induction *}
   14.21  
   14.22  text {*
   14.23   The following derived rule admits induction over some expression
   14.24   $f(x)$ wrt.\ the ${<}$ relation on natural numbers.
   14.25 -*};
   14.26 +*}
   14.27  
   14.28  lemma gen_less_induct:
   14.29    "(!!x. ALL y. f y < f x --> P y (f y) ==> P x (f x))
   14.30      ==> P x (f x :: nat)"
   14.31 -  (is "(!!x. ?H x ==> ?C x) ==> _");
   14.32 -proof -;
   14.33 -  assume asm: "!!x. ?H x ==> ?C x";
   14.34 -  {;
   14.35 -    fix k;
   14.36 -    have "ALL x. k = f x --> ?C x" (is "?Q k");
   14.37 -    proof (rule nat_less_induct);
   14.38 -      fix k; assume hyp: "ALL m<k. ?Q m";
   14.39 -      show "?Q k";
   14.40 -      proof;
   14.41 -	fix x; show "k = f x --> ?C x";
   14.42 -	proof;
   14.43 -	  assume "k = f x";
   14.44 -	  with hyp; have "?H x"; by blast;
   14.45 -	  thus "?C x"; by (rule asm);
   14.46 -	qed;
   14.47 -      qed;
   14.48 -    qed;
   14.49 -  };
   14.50 -  thus "?C x"; by simp;
   14.51 -qed;
   14.52 +  (is "(!!x. ?H x ==> ?C x) ==> _")
   14.53 +proof -
   14.54 +  assume asm: "!!x. ?H x ==> ?C x"
   14.55 +  {
   14.56 +    fix k
   14.57 +    have "ALL x. k = f x --> ?C x" (is "?Q k")
   14.58 +    proof (rule nat_less_induct)
   14.59 +      fix k assume hyp: "ALL m<k. ?Q m"
   14.60 +      show "?Q k"
   14.61 +      proof
   14.62 +	fix x show "k = f x --> ?C x"
   14.63 +	proof
   14.64 +	  assume "k = f x"
   14.65 +	  with hyp have "?H x" by blast
   14.66 +	  thus "?C x" by (rule asm)
   14.67 +	qed
   14.68 +      qed
   14.69 +    qed
   14.70 +  }
   14.71 +  thus "?C x" by simp
   14.72 +qed
   14.73  
   14.74  
   14.75 -subsection {* The problem *};
   14.76 +subsection {* The problem *}
   14.77  
   14.78  text {*
   14.79   Given some function $f\colon \Nat \to \Nat$ such that $f \ap (f \ap
   14.80   n) < f \ap (\idt{Suc} \ap n)$ for all $n$.  Demonstrate that $f$ is
   14.81   the identity.
   14.82 -*};
   14.83 +*}
   14.84  
   14.85 -consts f :: "nat => nat";
   14.86 -axioms f_ax: "f (f n) < f (Suc n)";
   14.87 +consts f :: "nat => nat"
   14.88 +axioms f_ax: "f (f n) < f (Suc n)"
   14.89  
   14.90 -theorem "f n = n";
   14.91 -proof (rule order_antisym);
   14.92 +theorem "f n = n"
   14.93 +proof (rule order_antisym)
   14.94    txt {*
   14.95      Note that the generalized form of $n \le f \ap n$ is required
   14.96      later for monotonicity as well.
   14.97 -  *};
   14.98 -  show ge: "!!n. n <= f n";
   14.99 -  proof -;
  14.100 -    fix n;
  14.101 -    show "?thesis n" (is "?P n (f n)");
  14.102 -    proof (rule gen_less_induct [of f ?P]);
  14.103 -      fix n; assume hyp: "ALL m. f m < f n --> ?P m (f m)";
  14.104 -      show "?P n (f n)";
  14.105 -      proof (rule nat.exhaust);
  14.106 -	assume "n = 0"; thus ?thesis; by simp;
  14.107 -      next;
  14.108 -	fix m; assume n_Suc: "n = Suc m";
  14.109 -	from f_ax; have "f (f m) < f (Suc m)"; .;
  14.110 -	with hyp n_Suc; have "f m <= f (f m)"; by blast;
  14.111 -	also; from f_ax; have "... < f (Suc m)"; .;
  14.112 -	finally; have lt: "f m < f (Suc m)"; .;
  14.113 -	with hyp n_Suc; have "m <= f m"; by blast;
  14.114 -	also; note lt;
  14.115 -	finally; have "m < f (Suc m)"; .;
  14.116 -	thus "n <= f n"; by (simp only: n_Suc);
  14.117 -      qed;
  14.118 -    qed;
  14.119 -  qed;
  14.120 +  *}
  14.121 +  show ge: "!!n. n <= f n"
  14.122 +  proof -
  14.123 +    fix n
  14.124 +    show "?thesis n" (is "?P n (f n)")
  14.125 +    proof (rule gen_less_induct [of f ?P])
  14.126 +      fix n assume hyp: "ALL m. f m < f n --> ?P m (f m)"
  14.127 +      show "?P n (f n)"
  14.128 +      proof (rule nat.exhaust)
  14.129 +	assume "n = 0" thus ?thesis by simp
  14.130 +      next
  14.131 +	fix m assume n_Suc: "n = Suc m"
  14.132 +	from f_ax have "f (f m) < f (Suc m)" .
  14.133 +	with hyp n_Suc have "f m <= f (f m)" by blast
  14.134 +	also from f_ax have "... < f (Suc m)" .
  14.135 +	finally have lt: "f m < f (Suc m)" .
  14.136 +	with hyp n_Suc have "m <= f m" by blast
  14.137 +	also note lt
  14.138 +	finally have "m < f (Suc m)" .
  14.139 +	thus "n <= f n" by (simp only: n_Suc)
  14.140 +      qed
  14.141 +    qed
  14.142 +  qed
  14.143  
  14.144    txt {*
  14.145      In order to show the other direction, we first establish
  14.146      monotonicity of $f$.
  14.147 -  *};
  14.148 -  have mono: "!!m n. m <= n --> f m <= f n";
  14.149 -  proof -;
  14.150 -    fix m n;
  14.151 -    show "?thesis m n" (is "?P n");
  14.152 -    proof (induct n);
  14.153 -      show "?P 0"; by simp;
  14.154 -      fix n; assume hyp: "?P n";
  14.155 -      show "?P (Suc n)";
  14.156 -      proof;
  14.157 -	assume "m <= Suc n";
  14.158 -	thus "f m <= f (Suc n)";
  14.159 -	proof (rule le_SucE);
  14.160 -	  assume "m <= n";
  14.161 -	  with hyp; have "f m <= f n"; ..;
  14.162 -	  also; from ge f_ax; have "... < f (Suc n)";
  14.163 -	    by (rule le_less_trans);
  14.164 -	  finally; show ?thesis; by simp;
  14.165 -	next;
  14.166 -	  assume "m = Suc n";
  14.167 -	  thus ?thesis; by simp;
  14.168 -	qed;
  14.169 -      qed;
  14.170 -    qed;
  14.171 -  qed;
  14.172 +  *}
  14.173 +  have mono: "!!m n. m <= n --> f m <= f n"
  14.174 +  proof -
  14.175 +    fix m n
  14.176 +    show "?thesis m n" (is "?P n")
  14.177 +    proof (induct n)
  14.178 +      show "?P 0" by simp
  14.179 +      fix n assume hyp: "?P n"
  14.180 +      show "?P (Suc n)"
  14.181 +      proof
  14.182 +	assume "m <= Suc n"
  14.183 +	thus "f m <= f (Suc n)"
  14.184 +	proof (rule le_SucE)
  14.185 +	  assume "m <= n"
  14.186 +	  with hyp have "f m <= f n" ..
  14.187 +	  also from ge f_ax have "... < f (Suc n)"
  14.188 +	    by (rule le_less_trans)
  14.189 +	  finally show ?thesis by simp
  14.190 +	next
  14.191 +	  assume "m = Suc n"
  14.192 +	  thus ?thesis by simp
  14.193 +	qed
  14.194 +      qed
  14.195 +    qed
  14.196 +  qed
  14.197  
  14.198 -  show "f n <= n";
  14.199 -  proof (rule leI);
  14.200 -    show "~ n < f n";
  14.201 -    proof;
  14.202 -      assume "n < f n";
  14.203 -      hence "Suc n <= f n"; by (rule Suc_leI);
  14.204 -      hence "f (Suc n) <= f (f n)"; by (rule mono [rule_format]);
  14.205 -      also; have "... < f (Suc n)"; by (rule f_ax);
  14.206 -      finally; have "... < ..."; .; thus False; ..;
  14.207 -    qed;
  14.208 -  qed;
  14.209 -qed;
  14.210 +  show "f n <= n"
  14.211 +  proof (rule leI)
  14.212 +    show "~ n < f n"
  14.213 +    proof
  14.214 +      assume "n < f n"
  14.215 +      hence "Suc n <= f n" by (rule Suc_leI)
  14.216 +      hence "f (Suc n) <= f (f n)" by (rule mono [rule_format])
  14.217 +      also have "... < f (Suc n)" by (rule f_ax)
  14.218 +      finally have "... < ..." . thus False ..
  14.219 +    qed
  14.220 +  qed
  14.221 +qed
  14.222  
  14.223 -end;
  14.224 +end
    15.1 --- a/src/HOL/Isar_examples/Summation.thy	Sun Sep 17 22:15:08 2000 +0200
    15.2 +++ b/src/HOL/Isar_examples/Summation.thy	Sun Sep 17 22:19:02 2000 +0200
    15.3 @@ -3,26 +3,26 @@
    15.4      Author:     Markus Wenzel
    15.5  *)
    15.6  
    15.7 -header {* Summing natural numbers *};
    15.8 +header {* Summing natural numbers *}
    15.9  
   15.10 -theory Summation = Main:;
   15.11 +theory Summation = Main:
   15.12  
   15.13  text_raw {*
   15.14   \footnote{This example is somewhat reminiscent of the
   15.15   \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, which is
   15.16   discussed in \cite{isabelle-ref} in the context of permutative
   15.17   rewrite rules and ordered rewriting.}
   15.18 -*};
   15.19 +*}
   15.20  
   15.21  text {*
   15.22   Subsequently, we prove some summation laws of natural numbers
   15.23   (including odds, squares, and cubes).  These examples demonstrate how
   15.24   plain natural deduction (including induction) may be combined with
   15.25   calculational proof.
   15.26 -*};
   15.27 +*}
   15.28  
   15.29  
   15.30 -subsection {* A summation operator *};
   15.31 +subsection {* A summation operator *}
   15.32  
   15.33  text {*
   15.34    The binder operator $\idt{sum} :: (\idt{nat} \To \idt{nat}) \To
   15.35 @@ -31,42 +31,42 @@
   15.36   \[
   15.37   \sum\limits_{i < k} f(i) = \idt{sum} \ap (\lam i f \ap i) \ap k
   15.38   \]
   15.39 -*};
   15.40 +*}
   15.41  
   15.42  consts
   15.43 -  sum :: "(nat => nat) => nat => nat";
   15.44 +  sum :: "(nat => nat) => nat => nat"
   15.45  
   15.46  primrec
   15.47    "sum f 0 = 0"
   15.48 -  "sum f (Suc n) = f n + sum f n";
   15.49 +  "sum f (Suc n) = f n + sum f n"
   15.50  
   15.51  syntax
   15.52    "_SUM" :: "idt => nat => nat => nat"
   15.53 -    ("SUM _ < _. _" [0, 0, 10] 10);
   15.54 +    ("SUM _ < _. _" [0, 0, 10] 10)
   15.55  translations
   15.56 -  "SUM i < k. b" == "sum (\\<lambda>i. b) k";
   15.57 +  "SUM i < k. b" == "sum (\<lambda>i. b) k"
   15.58  
   15.59  
   15.60 -subsection {* Summation laws *};
   15.61 +subsection {* Summation laws *}
   15.62  
   15.63  text {*
   15.64   The sum of natural numbers $0 + \cdots + n$ equals $n \times (n +
   15.65   1)/2$.  Avoiding formal reasoning about division we prove this
   15.66   equation multiplied by $2$.
   15.67 -*};
   15.68 +*}
   15.69  
   15.70  theorem sum_of_naturals:
   15.71    "2 * (SUM i < n + 1. i) = n * (n + 1)"
   15.72 -  (is "?P n" is "?S n = _");
   15.73 -proof (induct n);
   15.74 -  show "?P 0"; by simp;
   15.75 +  (is "?P n" is "?S n = _")
   15.76 +proof (induct n)
   15.77 +  show "?P 0" by simp
   15.78  
   15.79 -  fix n;
   15.80 -  have "?S (n + 1) = ?S n + 2 * (n + 1)"; by simp;
   15.81 -  also; assume "?S n = n * (n + 1)";
   15.82 -  also; have "... + 2 * (n + 1) = (n + 1) * (n + 2)"; by simp;
   15.83 -  finally; show "?P (Suc n)"; by simp;
   15.84 -qed;
   15.85 +  fix n
   15.86 +  have "?S (n + 1) = ?S n + 2 * (n + 1)" by simp
   15.87 +  also assume "?S n = n * (n + 1)"
   15.88 +  also have "... + 2 * (n + 1) = (n + 1) * (n + 2)" by simp
   15.89 +  finally show "?P (Suc n)" by simp
   15.90 +qed
   15.91  
   15.92  text {*
   15.93   The above proof is a typical instance of mathematical induction.  The
   15.94 @@ -103,63 +103,63 @@
   15.95   context elements of the form $x:A$ instead.
   15.96  
   15.97   \end{enumerate}
   15.98 -*};
   15.99 +*}
  15.100  
  15.101  text {*
  15.102   \medskip We derive further summation laws for odds, squares, and
  15.103   cubes as follows.  The basic technique of induction plus calculation
  15.104   is the same as before.
  15.105 -*};
  15.106 +*}
  15.107  
  15.108  theorem sum_of_odds:
  15.109    "(SUM i < n. 2 * i + 1) = n^2"
  15.110 -  (is "?P n" is "?S n = _");
  15.111 -proof (induct n);
  15.112 -  show "?P 0"; by simp;
  15.113 +  (is "?P n" is "?S n = _")
  15.114 +proof (induct n)
  15.115 +  show "?P 0" by simp
  15.116  
  15.117 -  fix n;
  15.118 -  have "?S (n + 1) = ?S n + 2 * n + 1"; by simp;
  15.119 -  also; assume "?S n = n^2";
  15.120 -  also; have "... + 2 * n + 1 = (n + 1)^2"; by simp;
  15.121 -  finally; show "?P (Suc n)"; by simp;
  15.122 -qed;
  15.123 +  fix n
  15.124 +  have "?S (n + 1) = ?S n + 2 * n + 1" by simp
  15.125 +  also assume "?S n = n^2"
  15.126 +  also have "... + 2 * n + 1 = (n + 1)^2" by simp
  15.127 +  finally show "?P (Suc n)" by simp
  15.128 +qed
  15.129  
  15.130  text {*
  15.131   Subsequently we require some additional tweaking of Isabelle built-in
  15.132   arithmetic simplifications, such as bringing in distributivity by
  15.133   hand.
  15.134 -*};
  15.135 +*}
  15.136  
  15.137 -lemmas distrib = add_mult_distrib add_mult_distrib2;
  15.138 +lemmas distrib = add_mult_distrib add_mult_distrib2
  15.139  
  15.140  theorem sum_of_squares:
  15.141    "#6 * (SUM i < n + 1. i^2) = n * (n + 1) * (2 * n + 1)"
  15.142 -  (is "?P n" is "?S n = _");
  15.143 -proof (induct n);
  15.144 -  show "?P 0"; by simp;
  15.145 +  (is "?P n" is "?S n = _")
  15.146 +proof (induct n)
  15.147 +  show "?P 0" by simp
  15.148  
  15.149 -  fix n;
  15.150 -  have "?S (n + 1) = ?S n + #6 * (n + 1)^2"; by (simp add: distrib);
  15.151 -  also; assume "?S n = n * (n + 1) * (2 * n + 1)";
  15.152 -  also; have "... + #6 * (n + 1)^2 =
  15.153 -    (n + 1) * (n + 2) * (2 * (n + 1) + 1)"; by (simp add: distrib);
  15.154 -  finally; show "?P (Suc n)"; by simp;
  15.155 -qed;
  15.156 +  fix n
  15.157 +  have "?S (n + 1) = ?S n + #6 * (n + 1)^2" by (simp add: distrib)
  15.158 +  also assume "?S n = n * (n + 1) * (2 * n + 1)"
  15.159 +  also have "... + #6 * (n + 1)^2 =
  15.160 +    (n + 1) * (n + 2) * (2 * (n + 1) + 1)" by (simp add: distrib)
  15.161 +  finally show "?P (Suc n)" by simp
  15.162 +qed
  15.163  
  15.164  theorem sum_of_cubes:
  15.165    "#4 * (SUM i < n + 1. i^#3) = (n * (n + 1))^2"
  15.166 -  (is "?P n" is "?S n = _");
  15.167 -proof (induct n);
  15.168 -  show "?P 0"; by (simp add: power_eq_if);
  15.169 +  (is "?P n" is "?S n = _")
  15.170 +proof (induct n)
  15.171 +  show "?P 0" by (simp add: power_eq_if)
  15.172  
  15.173 -  fix n;
  15.174 -  have "?S (n + 1) = ?S n + #4 * (n + 1)^#3";
  15.175 -    by (simp add: power_eq_if distrib);
  15.176 -  also; assume "?S n = (n * (n + 1))^2";
  15.177 -  also; have "... + #4 * (n + 1)^#3 = ((n + 1) * ((n + 1) + 1))^2";
  15.178 -    by (simp add: power_eq_if distrib);
  15.179 -  finally; show "?P (Suc n)"; by simp;
  15.180 -qed;
  15.181 +  fix n
  15.182 +  have "?S (n + 1) = ?S n + #4 * (n + 1)^#3"
  15.183 +    by (simp add: power_eq_if distrib)
  15.184 +  also assume "?S n = (n * (n + 1))^2"
  15.185 +  also have "... + #4 * (n + 1)^#3 = ((n + 1) * ((n + 1) + 1))^2"
  15.186 +    by (simp add: power_eq_if distrib)
  15.187 +  finally show "?P (Suc n)" by simp
  15.188 +qed
  15.189  
  15.190  text {*
  15.191   Comparing these examples with the tactic script version
  15.192 @@ -180,6 +180,6 @@
  15.193   such as $\idt{simp}$ or $\idt{auto}$ should normally be never used as
  15.194   initial proof methods, but only as terminal ones, solving certain
  15.195   goals completely.
  15.196 -*};
  15.197 +*}
  15.198  
  15.199 -end;
  15.200 +end
    16.1 --- a/src/HOL/Isar_examples/W_correct.thy	Sun Sep 17 22:15:08 2000 +0200
    16.2 +++ b/src/HOL/Isar_examples/W_correct.thy	Sun Sep 17 22:19:02 2000 +0200
    16.3 @@ -5,76 +5,76 @@
    16.4  Correctness of Milner's type inference algorithm W (let-free version).
    16.5  *)
    16.6  
    16.7 -header {* Milner's type inference algorithm~W (let-free version) *};
    16.8 +header {* Milner's type inference algorithm~W (let-free version) *}
    16.9  
   16.10 -theory W_correct = Main + Type:;
   16.11 +theory W_correct = Main + Type:
   16.12  
   16.13  text_raw {*
   16.14    \footnote{Based upon \url{http://isabelle.in.tum.de/library/HOL/W0}
   16.15    by Dieter Nazareth and Tobias Nipkow.}
   16.16 -*};
   16.17 +*}
   16.18  
   16.19  
   16.20 -subsection "Mini ML with type inference rules";
   16.21 +subsection "Mini ML with type inference rules"
   16.22  
   16.23  datatype
   16.24 -  expr = Var nat | Abs expr | App expr expr;
   16.25 +  expr = Var nat | Abs expr | App expr expr
   16.26  
   16.27  
   16.28 -text {* Type inference rules. *};
   16.29 +text {* Type inference rules. *}
   16.30  
   16.31  consts
   16.32 -  has_type :: "(typ list * expr * typ) set";
   16.33 +  has_type :: "(typ list * expr * typ) set"
   16.34  
   16.35  syntax
   16.36    "_has_type" :: "[typ list, expr, typ] => bool"
   16.37 -    ("((_) |-/ (_) :: (_))" [60, 0, 60] 60);
   16.38 +    ("((_) |-/ (_) :: (_))" [60, 0, 60] 60)
   16.39  translations
   16.40 -  "a |- e :: t" == "(a,e,t) : has_type";
   16.41 +  "a |- e :: t" == "(a,e,t) : has_type"
   16.42  
   16.43  inductive has_type
   16.44    intros [simp]
   16.45      Var: "n < length a ==> a |- Var n :: a ! n"
   16.46      Abs: "t1#a |- e :: t2 ==> a |- Abs e :: t1 -> t2"
   16.47      App: "a |- e1 :: t2 -> t1 ==> a |- e2 :: t2
   16.48 -      ==> a |- App e1 e2 :: t1";
   16.49 +      ==> a |- App e1 e2 :: t1"
   16.50  
   16.51  
   16.52 -text {* Type assigment is closed wrt.\ substitution. *};
   16.53 +text {* Type assigment is closed wrt.\ substitution. *}
   16.54  
   16.55 -lemma has_type_subst_closed: "a |- e :: t ==> $s a |- e :: $s t";
   16.56 -proof -;
   16.57 -  assume "a |- e :: t";
   16.58 -  thus ?thesis (is "?P a e t");
   16.59 -  proof (induct (open) ?P a e t);
   16.60 -    case Var;
   16.61 -    hence "n < length (map ($ s) a)"; by simp;
   16.62 -    hence "map ($ s) a |- Var n :: map ($ s) a ! n";
   16.63 -      by (rule has_type.Var);
   16.64 -    also; have "map ($ s) a ! n = $ s (a ! n)";
   16.65 -      by (rule nth_map);
   16.66 -    also; have "map ($ s) a = $ s a";
   16.67 -      by (simp only: app_subst_list);
   16.68 -    finally; show "?P a (Var n) (a ! n)"; .;
   16.69 -  next;
   16.70 -    case Abs;
   16.71 -    hence "$ s t1 # map ($ s) a |- e :: $ s t2";
   16.72 -      by (simp add: app_subst_list);
   16.73 -    hence "map ($ s) a |- Abs e :: $ s t1 -> $ s t2";
   16.74 -      by (rule has_type.Abs);
   16.75 -    thus "?P a (Abs e) (t1 -> t2)";
   16.76 -      by (simp add: app_subst_list);
   16.77 -  next;
   16.78 -    case App;
   16.79 -    thus "?P a (App e1 e2) t1"; by simp;
   16.80 -  qed;
   16.81 -qed;
   16.82 +lemma has_type_subst_closed: "a |- e :: t ==> $s a |- e :: $s t"
   16.83 +proof -
   16.84 +  assume "a |- e :: t"
   16.85 +  thus ?thesis (is "?P a e t")
   16.86 +  proof (induct (open) ?P a e t)
   16.87 +    case Var
   16.88 +    hence "n < length (map ($ s) a)" by simp
   16.89 +    hence "map ($ s) a |- Var n :: map ($ s) a ! n"
   16.90 +      by (rule has_type.Var)
   16.91 +    also have "map ($ s) a ! n = $ s (a ! n)"
   16.92 +      by (rule nth_map)
   16.93 +    also have "map ($ s) a = $ s a"
   16.94 +      by (simp only: app_subst_list)
   16.95 +    finally show "?P a (Var n) (a ! n)" .
   16.96 +  next
   16.97 +    case Abs
   16.98 +    hence "$ s t1 # map ($ s) a |- e :: $ s t2"
   16.99 +      by (simp add: app_subst_list)
  16.100 +    hence "map ($ s) a |- Abs e :: $ s t1 -> $ s t2"
  16.101 +      by (rule has_type.Abs)
  16.102 +    thus "?P a (Abs e) (t1 -> t2)"
  16.103 +      by (simp add: app_subst_list)
  16.104 +  next
  16.105 +    case App
  16.106 +    thus "?P a (App e1 e2) t1" by simp
  16.107 +  qed
  16.108 +qed
  16.109  
  16.110  
  16.111 -subsection {* Type inference algorithm W *};
  16.112 +subsection {* Type inference algorithm W *}
  16.113  
  16.114  consts
  16.115 -  W :: "[expr, typ list, nat] => (subst * typ * nat) maybe";
  16.116 +  W :: "[expr, typ list, nat] => (subst * typ * nat) maybe"
  16.117  
  16.118  primrec
  16.119    "W (Var i) a n =
  16.120 @@ -86,64 +86,64 @@
  16.121      ((s1, t1, m1) := W e1 a n;
  16.122       (s2, t2, m2) := W e2 ($s1 a) m1;
  16.123       u := mgu ($ s2 t1) (t2 -> TVar m2);
  16.124 -     Ok ($u o $s2 o s1, $u (TVar m2), Suc m2))";
  16.125 +     Ok ($u o $s2 o s1, $u (TVar m2), Suc m2))"
  16.126  
  16.127  
  16.128 -subsection {* Correctness theorem *};
  16.129 +subsection {* Correctness theorem *}
  16.130  
  16.131 -theorem W_correct: "W e a n = Ok (s, t, m) ==> $ s a |- e :: t";
  16.132 -proof -;
  16.133 -  assume W_ok: "W e a n = Ok (s, t, m)";
  16.134 +theorem W_correct: "W e a n = Ok (s, t, m) ==> $ s a |- e :: t"
  16.135 +proof -
  16.136 +  assume W_ok: "W e a n = Ok (s, t, m)"
  16.137    have "ALL a s t m n. Ok (s, t, m) = W e a n --> $ s a |- e :: t"
  16.138 -    (is "?P e");
  16.139 -  proof (induct (stripped) e);
  16.140 -    fix a s t m n;
  16.141 +    (is "?P e")
  16.142 +  proof (induct (stripped) e)
  16.143 +    fix a s t m n
  16.144      {
  16.145 -      fix i;
  16.146 -      assume "Ok (s, t, m) = W (Var i) a n";
  16.147 -      thus "$ s a |- Var i :: t"; by (simp split: if_splits);
  16.148 -    next;
  16.149 -      fix e; assume hyp: "?P e";
  16.150 -      assume "Ok (s, t, m) = W (Abs e) a n";
  16.151 -      then; obtain t' where "t = s n -> t'"
  16.152 -	  and "Ok (s, t', m) = W e (TVar n # a) (Suc n)";
  16.153 -	by (auto split: bind_splits);
  16.154 -      with hyp; show "$ s a |- Abs e :: t";
  16.155 -	by (force intro: has_type.Abs);
  16.156 -    next;
  16.157 -      fix e1 e2; assume hyp1: "?P e1" and hyp2: "?P e2";
  16.158 -      assume "Ok (s, t, m) = W (App e1 e2) a n";
  16.159 -      then; obtain s1 t1 n1 s2 t2 n2 u where
  16.160 +      fix i
  16.161 +      assume "Ok (s, t, m) = W (Var i) a n"
  16.162 +      thus "$ s a |- Var i :: t" by (simp split: if_splits)
  16.163 +    next
  16.164 +      fix e assume hyp: "?P e"
  16.165 +      assume "Ok (s, t, m) = W (Abs e) a n"
  16.166 +      then obtain t' where "t = s n -> t'"
  16.167 +	  and "Ok (s, t', m) = W e (TVar n # a) (Suc n)"
  16.168 +	by (auto split: bind_splits)
  16.169 +      with hyp show "$ s a |- Abs e :: t"
  16.170 +	by (force intro: has_type.Abs)
  16.171 +    next
  16.172 +      fix e1 e2 assume hyp1: "?P e1" and hyp2: "?P e2"
  16.173 +      assume "Ok (s, t, m) = W (App e1 e2) a n"
  16.174 +      then obtain s1 t1 n1 s2 t2 n2 u where
  16.175            s: "s = $ u o $ s2 o s1"
  16.176            and t: "t = u n2"
  16.177            and mgu_ok: "mgu ($ s2 t1) (t2 -> TVar n2) = Ok u"
  16.178            and W1_ok: "W e1 a n = Ok (s1, t1, n1)"
  16.179 -          and W2_ok: "W e2 ($ s1 a) n1 = Ok (s2, t2, n2)";
  16.180 -	by (auto split: bind_splits);
  16.181 -      show "$ s a |- App e1 e2 :: t";
  16.182 -      proof (rule has_type.App);
  16.183 -        from s; have s': "$ u ($ s2 ($ s1 a)) = $s a";
  16.184 -          by (simp add: subst_comp_tel o_def);
  16.185 -        show "$s a |- e1 :: $ u t2 -> t";
  16.186 -        proof -;
  16.187 -          from hyp1 W1_ok [symmetric]; have "$ s1 a |- e1 :: t1";
  16.188 -            by blast;
  16.189 -          hence "$ u ($ s2 ($ s1 a)) |- e1 :: $ u ($ s2 t1)";
  16.190 -            by (intro has_type_subst_closed);
  16.191 -          with s' t mgu_ok; show ?thesis; by simp;
  16.192 -        qed;
  16.193 -        show "$ s a |- e2 :: $ u t2";
  16.194 -        proof -;
  16.195 -          from hyp2 W2_ok [symmetric];
  16.196 -          have "$ s2 ($ s1 a) |- e2 :: t2"; by blast;
  16.197 -          hence "$ u ($ s2 ($ s1 a)) |- e2 :: $ u t2";
  16.198 -            by (rule has_type_subst_closed);
  16.199 -          with s'; show ?thesis; by simp;
  16.200 -        qed;
  16.201 -      qed;
  16.202 -    };
  16.203 -  qed;
  16.204 -  with W_ok [symmetric]; show ?thesis; by blast;
  16.205 -qed;
  16.206 +          and W2_ok: "W e2 ($ s1 a) n1 = Ok (s2, t2, n2)"
  16.207 +	by (auto split: bind_splits)
  16.208 +      show "$ s a |- App e1 e2 :: t"
  16.209 +      proof (rule has_type.App)
  16.210 +        from s have s': "$ u ($ s2 ($ s1 a)) = $s a"
  16.211 +          by (simp add: subst_comp_tel o_def)
  16.212 +        show "$s a |- e1 :: $ u t2 -> t"
  16.213 +        proof -
  16.214 +          from hyp1 W1_ok [symmetric] have "$ s1 a |- e1 :: t1"
  16.215 +            by blast
  16.216 +          hence "$ u ($ s2 ($ s1 a)) |- e1 :: $ u ($ s2 t1)"
  16.217 +            by (intro has_type_subst_closed)
  16.218 +          with s' t mgu_ok show ?thesis by simp
  16.219 +        qed
  16.220 +        show "$ s a |- e2 :: $ u t2"
  16.221 +        proof -
  16.222 +          from hyp2 W2_ok [symmetric]
  16.223 +          have "$ s2 ($ s1 a) |- e2 :: t2" by blast
  16.224 +          hence "$ u ($ s2 ($ s1 a)) |- e2 :: $ u t2"
  16.225 +            by (rule has_type_subst_closed)
  16.226 +          with s' show ?thesis by simp
  16.227 +        qed
  16.228 +      qed
  16.229 +    }
  16.230 +  qed
  16.231 +  with W_ok [symmetric] show ?thesis by blast
  16.232 +qed
  16.233  
  16.234 -end;
  16.235 +end
    17.1 --- a/src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy	Sun Sep 17 22:15:08 2000 +0200
    17.2 +++ b/src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy	Sun Sep 17 22:19:02 2000 +0200
    17.3 @@ -3,9 +3,9 @@
    17.4      Author:     Gertrud Bauer, TU Munich
    17.5  *)
    17.6  
    17.7 -header {* Extending non-maximal functions *};
    17.8 +header {* Extending non-maximal functions *}
    17.9  
   17.10 -theory HahnBanachExtLemmas = FunctionNorm:;
   17.11 +theory HahnBanachExtLemmas = FunctionNorm:
   17.12  
   17.13  text{* In this section the following context is presumed.
   17.14  Let $E$ be a real vector space with a 
   17.15 @@ -19,7 +19,7 @@
   17.16  $h'\ap x = h\ap y + a \cdot \xi$ for a certain $\xi$.
   17.17  
   17.18  Subsequently we show some properties of this extension $h'$ of $h$.
   17.19 -*}; 
   17.20 +*} 
   17.21  
   17.22  
   17.23  text {* This lemma will be used to show the existence of a linear
   17.24 @@ -32,211 +32,211 @@
   17.25  it suffices to show that 
   17.26  \begin{matharray}{l} \All
   17.27  {u\in F}{\All {v\in F}{a\ap u \leq b \ap v}} 
   17.28 -\end{matharray} *};
   17.29 +\end{matharray} *}
   17.30  
   17.31  lemma ex_xi: 
   17.32    "[| is_vectorspace F; !! u v. [| u \<in> F; v \<in> F |] ==> a u <= b v |]
   17.33 -  ==> \<exists>xi::real. \<forall>y \<in> F. a y <= xi \<and> xi <= b y"; 
   17.34 -proof -;
   17.35 -  assume vs: "is_vectorspace F";
   17.36 -  assume r: "(!! u v. [| u \<in> F; v \<in> F |] ==> a u <= (b v::real))";
   17.37 +  ==> \<exists>xi::real. \<forall>y \<in> F. a y <= xi \<and> xi <= b y" 
   17.38 +proof -
   17.39 +  assume vs: "is_vectorspace F"
   17.40 +  assume r: "(!! u v. [| u \<in> F; v \<in> F |] ==> a u <= (b v::real))"
   17.41  
   17.42    txt {* From the completeness of the reals follows:
   17.43    The set $S = \{a\: u\dt\: u\in F\}$ has a supremum, if
   17.44 -  it is non-empty and has an upper bound. *};
   17.45 +  it is non-empty and has an upper bound. *}
   17.46  
   17.47 -  let ?S = "{a u :: real | u. u \<in> F}";
   17.48 +  let ?S = "{a u :: real | u. u \<in> F}"
   17.49  
   17.50 -  have "\<exists>xi. isLub UNIV ?S xi";  
   17.51 -  proof (rule reals_complete);
   17.52 +  have "\<exists>xi. isLub UNIV ?S xi"  
   17.53 +  proof (rule reals_complete)
   17.54    
   17.55 -    txt {* The set $S$ is non-empty, since $a\ap\zero \in S$: *};
   17.56 +    txt {* The set $S$ is non-empty, since $a\ap\zero \in S$: *}
   17.57  
   17.58 -    from vs; have "a 0 \<in> ?S"; by force;
   17.59 -    thus "\<exists>X. X \<in> ?S"; ..;
   17.60 +    from vs have "a 0 \<in> ?S" by force
   17.61 +    thus "\<exists>X. X \<in> ?S" ..
   17.62  
   17.63 -    txt {* $b\ap \zero$ is an upper bound of $S$: *};
   17.64 +    txt {* $b\ap \zero$ is an upper bound of $S$: *}
   17.65  
   17.66 -    show "\<exists>Y. isUb UNIV ?S Y"; 
   17.67 -    proof; 
   17.68 -      show "isUb UNIV ?S (b 0)";
   17.69 -      proof (intro isUbI setleI ballI);
   17.70 -        show "b 0 \<in> UNIV"; ..;
   17.71 -      next;
   17.72 +    show "\<exists>Y. isUb UNIV ?S Y" 
   17.73 +    proof 
   17.74 +      show "isUb UNIV ?S (b 0)"
   17.75 +      proof (intro isUbI setleI ballI)
   17.76 +        show "b 0 \<in> UNIV" ..
   17.77 +      next
   17.78  
   17.79 -        txt {* Every element $y\in S$ is less than $b\ap \zero$: *};
   17.80 +        txt {* Every element $y\in S$ is less than $b\ap \zero$: *}
   17.81  
   17.82 -        fix y; assume y: "y \<in> ?S"; 
   17.83 -        from y; have "\<exists>u \<in> F. y = a u"; by fast;
   17.84 -        thus "y <= b 0"; 
   17.85 -        proof;
   17.86 -          fix u; assume "u \<in> F"; 
   17.87 -          assume "y = a u";
   17.88 -          also; have "a u <= b 0"; by (rule r) (simp!)+;
   17.89 -          finally; show ?thesis; .;
   17.90 -        qed;
   17.91 -      qed;
   17.92 -    qed;
   17.93 -  qed;
   17.94 +        fix y assume y: "y \<in> ?S" 
   17.95 +        from y have "\<exists>u \<in> F. y = a u" by fast
   17.96 +        thus "y <= b 0" 
   17.97 +        proof
   17.98 +          fix u assume "u \<in> F" 
   17.99 +          assume "y = a u"
  17.100 +          also have "a u <= b 0" by (rule r) (simp!)+
  17.101 +          finally show ?thesis .
  17.102 +        qed
  17.103 +      qed
  17.104 +    qed
  17.105 +  qed
  17.106  
  17.107 -  thus "\<exists>xi. \<forall>y \<in> F. a y <= xi \<and> xi <= b y"; 
  17.108 -  proof (elim exE);
  17.109 -    fix xi; assume "isLub UNIV ?S xi"; 
  17.110 -    show ?thesis;
  17.111 -    proof (intro exI conjI ballI); 
  17.112 +  thus "\<exists>xi. \<forall>y \<in> F. a y <= xi \<and> xi <= b y" 
  17.113 +  proof (elim exE)
  17.114 +    fix xi assume "isLub UNIV ?S xi" 
  17.115 +    show ?thesis
  17.116 +    proof (intro exI conjI ballI) 
  17.117     
  17.118 -      txt {* For all $y\in F$ holds $a\ap y \leq \xi$: *};
  17.119 +      txt {* For all $y\in F$ holds $a\ap y \leq \xi$: *}
  17.120       
  17.121 -      fix y; assume y: "y \<in> F";
  17.122 -      show "a y <= xi";    
  17.123 -      proof (rule isUbD);  
  17.124 -        show "isUb UNIV ?S xi"; ..;
  17.125 -      qed (force!);
  17.126 -    next;
  17.127 +      fix y assume y: "y \<in> F"
  17.128 +      show "a y <= xi"    
  17.129 +      proof (rule isUbD)  
  17.130 +        show "isUb UNIV ?S xi" ..
  17.131 +      qed (force!)
  17.132 +    next
  17.133  
  17.134 -      txt {* For all $y\in F$ holds $\xi\leq b\ap y$: *};
  17.135 +      txt {* For all $y\in F$ holds $\xi\leq b\ap y$: *}
  17.136  
  17.137 -      fix y; assume "y \<in> F";
  17.138 -      show "xi <= b y";  
  17.139 -      proof (intro isLub_le_isUb isUbI setleI);
  17.140 -        show "b y \<in> UNIV"; ..;
  17.141 -        show "\<forall>ya \<in> ?S. ya <= b y"; 
  17.142 -        proof;
  17.143 -          fix au; assume au: "au \<in> ?S ";
  17.144 -          hence "\<exists>u \<in> F. au = a u"; by fast;
  17.145 -          thus "au <= b y";
  17.146 -          proof;
  17.147 -            fix u; assume "u \<in> F"; assume "au = a u";  
  17.148 -            also; have "... <= b y"; by (rule r);
  17.149 -            finally; show ?thesis; .;
  17.150 -          qed;
  17.151 -        qed;
  17.152 -      qed; 
  17.153 -    qed;
  17.154 -  qed;
  17.155 -qed;
  17.156 +      fix y assume "y \<in> F"
  17.157 +      show "xi <= b y"  
  17.158 +      proof (intro isLub_le_isUb isUbI setleI)
  17.159 +        show "b y \<in> UNIV" ..
  17.160 +        show "\<forall>ya \<in> ?S. ya <= b y" 
  17.161 +        proof
  17.162 +          fix au assume au: "au \<in> ?S "
  17.163 +          hence "\<exists>u \<in> F. au = a u" by fast
  17.164 +          thus "au <= b y"
  17.165 +          proof
  17.166 +            fix u assume "u \<in> F" assume "au = a u"  
  17.167 +            also have "... <= b y" by (rule r)
  17.168 +            finally show ?thesis .
  17.169 +          qed
  17.170 +        qed
  17.171 +      qed 
  17.172 +    qed
  17.173 +  qed
  17.174 +qed
  17.175  
  17.176  text{* \medskip The function $h'$ is defined as a
  17.177  $h'\ap x = h\ap y + a\cdot \xi$ where $x = y + a\mult \xi$
  17.178 -is a linear extension of $h$ to $H'$. *};
  17.179 +is a linear extension of $h$ to $H'$. *}
  17.180  
  17.181  lemma h'_lf: 
  17.182    "[| h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H 
  17.183                  in h y + a * xi);
  17.184    H' == H + lin x0; is_subspace H E; is_linearform H h; x0 \<notin> H; 
  17.185    x0 \<in> E; x0 \<noteq> 0; is_vectorspace E |]
  17.186 -  ==> is_linearform H' h'";
  17.187 -proof -;
  17.188 +  ==> is_linearform H' h'"
  17.189 +proof -
  17.190    assume h'_def: 
  17.191      "h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H 
  17.192                 in h y + a * xi)"
  17.193      and H'_def: "H' == H + lin x0" 
  17.194      and vs: "is_subspace H E" "is_linearform H h" "x0 \<notin> H"
  17.195 -      "x0 \<noteq> 0" "x0 \<in> E" "is_vectorspace E";
  17.196 +      "x0 \<noteq> 0" "x0 \<in> E" "is_vectorspace E"
  17.197  
  17.198 -  have h': "is_vectorspace H'"; 
  17.199 -  proof (unfold H'_def, rule vs_sum_vs);
  17.200 -    show "is_subspace (lin x0) E"; ..;
  17.201 -  qed; 
  17.202 +  have h': "is_vectorspace H'" 
  17.203 +  proof (unfold H'_def, rule vs_sum_vs)
  17.204 +    show "is_subspace (lin x0) E" ..
  17.205 +  qed 
  17.206  
  17.207 -  show ?thesis;
  17.208 -  proof;
  17.209 -    fix x1 x2; assume x1: "x1 \<in> H'" and x2: "x2 \<in> H'"; 
  17.210 +  show ?thesis
  17.211 +  proof
  17.212 +    fix x1 x2 assume x1: "x1 \<in> H'" and x2: "x2 \<in> H'" 
  17.213  
  17.214      txt{* We now have to show that $h'$ is additive, i.~e.\
  17.215      $h' \ap (x_1\plus x_2) = h'\ap x_1 + h'\ap x_2$
  17.216 -    for $x_1, x_2\in H$. *}; 
  17.217 +    for $x_1, x_2\in H$. *} 
  17.218  
  17.219 -    have x1x2: "x1 + x2 \<in> H'"; 
  17.220 -      by (rule vs_add_closed, rule h'); 
  17.221 -    from x1; 
  17.222 -    have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0  \<and> y1 \<in> H"; 
  17.223 -      by (unfold H'_def vs_sum_def lin_def) fast;
  17.224 -    from x2; 
  17.225 -    have ex_x2: "\<exists>y2 a2. x2 = y2 + a2 \<cdot> x0 \<and> y2 \<in> H"; 
  17.226 -      by (unfold H'_def vs_sum_def lin_def) fast;
  17.227 -    from x1x2; 
  17.228 -    have ex_x1x2: "\<exists>y a. x1 + x2 = y + a \<cdot> x0 \<and> y \<in> H";
  17.229 -      by (unfold H'_def vs_sum_def lin_def) fast;
  17.230 +    have x1x2: "x1 + x2 \<in> H'" 
  17.231 +      by (rule vs_add_closed, rule h') 
  17.232 +    from x1 
  17.233 +    have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0  \<and> y1 \<in> H" 
  17.234 +      by (unfold H'_def vs_sum_def lin_def) fast
  17.235 +    from x2 
  17.236 +    have ex_x2: "\<exists>y2 a2. x2 = y2 + a2 \<cdot> x0 \<and> y2 \<in> H" 
  17.237 +      by (unfold H'_def vs_sum_def lin_def) fast
  17.238 +    from x1x2 
  17.239 +    have ex_x1x2: "\<exists>y a. x1 + x2 = y + a \<cdot> x0 \<and> y \<in> H"
  17.240 +      by (unfold H'_def vs_sum_def lin_def) fast
  17.241  
  17.242 -    from ex_x1 ex_x2 ex_x1x2;
  17.243 -    show "h' (x1 + x2) = h' x1 + h' x2";
  17.244 -    proof (elim exE conjE);
  17.245 -      fix y1 y2 y a1 a2 a;
  17.246 +    from ex_x1 ex_x2 ex_x1x2
  17.247 +    show "h' (x1 + x2) = h' x1 + h' x2"
  17.248 +    proof (elim exE conjE)
  17.249 +      fix y1 y2 y a1 a2 a
  17.250        assume y1: "x1 = y1 + a1 \<cdot> x0"     and y1': "y1 \<in> H"
  17.251           and y2: "x2 = y2 + a2 \<cdot> x0"     and y2': "y2 \<in> H" 
  17.252 -         and y: "x1 + x2 = y + a \<cdot> x0"   and y':  "y  \<in> H"; 
  17.253 +         and y: "x1 + x2 = y + a \<cdot> x0"   and y':  "y  \<in> H" 
  17.254        txt {* \label{decomp-H-use}*}
  17.255 -      have ya: "y1 + y2 = y \<and> a1 + a2 = a"; 
  17.256 +      have ya: "y1 + y2 = y \<and> a1 + a2 = a" 
  17.257        proof (rule decomp_H')
  17.258 -        show "y1 + y2 + (a1 + a2) \<cdot> x0 = y + a \<cdot> x0"; 
  17.259 -          by (simp! add: vs_add_mult_distrib2 [of E]);
  17.260 -        show "y1 + y2 \<in> H"; ..;
  17.261 -      qed;
  17.262 +        show "y1 + y2 + (a1 + a2) \<cdot> x0 = y + a \<cdot> x0" 
  17.263 +          by (simp! add: vs_add_mult_distrib2 [of E])
  17.264 +        show "y1 + y2 \<in> H" ..
  17.265 +      qed
  17.266  
  17.267 -      have "h' (x1 + x2) = h y + a * xi";
  17.268 -	by (rule h'_definite);
  17.269 -      also; have "... = h (y1 + y2) + (a1 + a2) * xi"; 
  17.270 -        by (simp add: ya);
  17.271 -      also; from vs y1' y2'; 
  17.272 -      have "... = h y1 + h y2 + a1 * xi + a2 * xi"; 
  17.273 +      have "h' (x1 + x2) = h y + a * xi"
  17.274 +	by (rule h'_definite)
  17.275 +      also have "... = h (y1 + y2) + (a1 + a2) * xi" 
  17.276 +        by (simp add: ya)
  17.277 +      also from vs y1' y2' 
  17.278 +      have "... = h y1 + h y2 + a1 * xi + a2 * xi" 
  17.279  	by (simp add: linearform_add [of H] 
  17.280 -                      real_add_mult_distrib);
  17.281 -      also; have "... = (h y1 + a1 * xi) + (h y2 + a2 * xi)"; 
  17.282 -        by simp;
  17.283 -      also; have "h y1 + a1 * xi = h' x1";
  17.284 -        by (rule h'_definite [symmetric]);
  17.285 -      also; have "h y2 + a2 * xi = h' x2"; 
  17.286 -        by (rule h'_definite [symmetric]);
  17.287 -      finally; show ?thesis; .;
  17.288 -    qed;
  17.289 +                      real_add_mult_distrib)
  17.290 +      also have "... = (h y1 + a1 * xi) + (h y2 + a2 * xi)" 
  17.291 +        by simp
  17.292 +      also have "h y1 + a1 * xi = h' x1"
  17.293 +        by (rule h'_definite [symmetric])
  17.294 +      also have "h y2 + a2 * xi = h' x2" 
  17.295 +        by (rule h'_definite [symmetric])
  17.296 +      finally show ?thesis .
  17.297 +    qed
  17.298   
  17.299      txt{* We further have to show that $h'$ is multiplicative, 
  17.300      i.~e.\ $h'\ap (c \mult x_1) = c \cdot h'\ap x_1$
  17.301      for $x\in H$ and $c\in \bbbR$. 
  17.302 -    *}; 
  17.303 +    *} 
  17.304  
  17.305 -  next;  
  17.306 -    fix c x1; assume x1: "x1 \<in> H'";    
  17.307 -    have ax1: "c \<cdot> x1 \<in> H'";
  17.308 -      by (rule vs_mult_closed, rule h');
  17.309 -    from x1; 
  17.310 -    have ex_x: "!! x. x\<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
  17.311 -      by (unfold H'_def vs_sum_def lin_def) fast;
  17.312 +  next  
  17.313 +    fix c x1 assume x1: "x1 \<in> H'"    
  17.314 +    have ax1: "c \<cdot> x1 \<in> H'"
  17.315 +      by (rule vs_mult_closed, rule h')
  17.316 +    from x1 
  17.317 +    have ex_x: "!! x. x\<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
  17.318 +      by (unfold H'_def vs_sum_def lin_def) fast
  17.319  
  17.320 -    from x1; have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H";
  17.321 -      by (unfold H'_def vs_sum_def lin_def) fast;
  17.322 -    with ex_x [of "c \<cdot> x1", OF ax1];
  17.323 -    show "h' (c \<cdot> x1) = c * (h' x1)";  
  17.324 -    proof (elim exE conjE);
  17.325 -      fix y1 y a1 a; 
  17.326 +    from x1 have ex_x1: "\<exists>y1 a1. x1 = y1 + a1 \<cdot> x0 \<and> y1 \<in> H"
  17.327 +      by (unfold H'_def vs_sum_def lin_def) fast
  17.328 +    with ex_x [of "c \<cdot> x1", OF ax1]
  17.329 +    show "h' (c \<cdot> x1) = c * (h' x1)"  
  17.330 +    proof (elim exE conjE)
  17.331 +      fix y1 y a1 a 
  17.332        assume y1: "x1 = y1 + a1 \<cdot> x0"     and y1': "y1 \<in> H"
  17.333 -        and y: "c \<cdot> x1 = y  + a \<cdot> x0"    and y': "y \<in> H"; 
  17.334 +        and y: "c \<cdot> x1 = y  + a \<cdot> x0"    and y': "y \<in> H" 
  17.335  
  17.336 -      have ya: "c \<cdot> y1 = y \<and> c * a1 = a"; 
  17.337 -      proof (rule decomp_H'); 
  17.338 -	show "c \<cdot> y1 + (c * a1) \<cdot> x0 = y + a \<cdot> x0"; 
  17.339 -          by (simp! add: vs_add_mult_distrib1);
  17.340 -        show "c \<cdot> y1 \<in> H"; ..;
  17.341 -      qed;
  17.342 +      have ya: "c \<cdot> y1 = y \<and> c * a1 = a" 
  17.343 +      proof (rule decomp_H') 
  17.344 +	show "c \<cdot> y1 + (c * a1) \<cdot> x0 = y + a \<cdot> x0" 
  17.345 +          by (simp! add: vs_add_mult_distrib1)
  17.346 +        show "c \<cdot> y1 \<in> H" ..
  17.347 +      qed
  17.348  
  17.349 -      have "h' (c \<cdot> x1) = h y + a * xi"; 
  17.350 -	by (rule h'_definite);
  17.351 -      also; have "... = h (c \<cdot> y1) + (c * a1) * xi";
  17.352 -        by (simp add: ya);
  17.353 -      also; from vs y1'; have "... = c * h y1 + c * a1 * xi"; 
  17.354 -	by (simp add: linearform_mult [of H]);
  17.355 -      also; from vs y1'; have "... = c * (h y1 + a1 * xi)"; 
  17.356 -	by (simp add: real_add_mult_distrib2 real_mult_assoc);
  17.357 -      also; have "h y1 + a1 * xi = h' x1"; 
  17.358 -        by (rule h'_definite [symmetric]);
  17.359 -      finally; show ?thesis; .;
  17.360 -    qed;
  17.361 -  qed;
  17.362 -qed;
  17.363 +      have "h' (c \<cdot> x1) = h y + a * xi" 
  17.364 +	by (rule h'_definite)
  17.365 +      also have "... = h (c \<cdot> y1) + (c * a1) * xi"
  17.366 +        by (simp add: ya)
  17.367 +      also from vs y1' have "... = c * h y1 + c * a1 * xi" 
  17.368 +	by (simp add: linearform_mult [of H])
  17.369 +      also from vs y1' have "... = c * (h y1 + a1 * xi)" 
  17.370 +	by (simp add: real_add_mult_distrib2 real_mult_assoc)
  17.371 +      also have "h y1 + a1 * xi = h' x1" 
  17.372 +        by (rule h'_definite [symmetric])
  17.373 +      finally show ?thesis .
  17.374 +    qed
  17.375 +  qed
  17.376 +qed
  17.377  
  17.378  text{* \medskip The linear extension $h'$ of $h$
  17.379 -is bounded by the seminorm $p$. *};
  17.380 +is bounded by the seminorm $p$. *}
  17.381  
  17.382  lemma h'_norm_pres:
  17.383    "[| h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H 
  17.384 @@ -245,100 +245,100 @@
  17.385    is_subspace H E; is_seminorm E p; is_linearform H h; 
  17.386    \<forall>y \<in> H. h y <= p y; 
  17.387    \<forall>y \<in> H. - p (y + x0) - h y <= xi \<and> xi <= p (y + x0) - h y |]
  17.388 -   ==> \<forall>x \<in> H'. h' x <= p x"; 
  17.389 -proof; 
  17.390 +   ==> \<forall>x \<in> H'. h' x <= p x" 
  17.391 +proof 
  17.392    assume h'_def: 
  17.393      "h' == (\<lambda>x. let (y, a) = SOME (y, a). x = y + a \<cdot> x0 \<and> y \<in> H 
  17.394                 in (h y) + a * xi)"
  17.395      and H'_def: "H' == H + lin x0" 
  17.396      and vs: "x0 \<notin> H" "x0 \<in> E" "x0 \<noteq> 0" "is_vectorspace E" 
  17.397              "is_subspace H E" "is_seminorm E p" "is_linearform H h" 
  17.398 -    and a: "\<forall>y \<in> H. h y <= p y";
  17.399 -  presume a1: "\<forall>ya \<in> H. - p (ya + x0) - h ya <= xi";
  17.400 -  presume a2: "\<forall>ya \<in> H. xi <= p (ya + x0) - h ya";
  17.401 -  fix x; assume "x \<in> H'"; 
  17.402 +    and a: "\<forall>y \<in> H. h y <= p y"
  17.403 +  presume a1: "\<forall>ya \<in> H. - p (ya + x0) - h ya <= xi"
  17.404 +  presume a2: "\<forall>ya \<in> H. xi <= p (ya + x0) - h ya"
  17.405 +  fix x assume "x \<in> H'" 
  17.406    have ex_x: 
  17.407 -    "!! x. x \<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
  17.408 -    by (unfold H'_def vs_sum_def lin_def) fast;
  17.409 -  have "\<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H";
  17.410 -    by (rule ex_x);
  17.411 -  thus "h' x <= p x";
  17.412 -  proof (elim exE conjE);
  17.413 -    fix y a; assume x: "x = y + a \<cdot> x0" and y: "y \<in> H";
  17.414 -    have "h' x = h y + a * xi";
  17.415 -      by (rule h'_definite);
  17.416 +    "!! x. x \<in> H' ==> \<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
  17.417 +    by (unfold H'_def vs_sum_def lin_def) fast
  17.418 +  have "\<exists>y a. x = y + a \<cdot> x0 \<and> y \<in> H"
  17.419 +    by (rule ex_x)
  17.420 +  thus "h' x <= p x"
  17.421 +  proof (elim exE conjE)
  17.422 +    fix y a assume x: "x = y + a \<cdot> x0" and y: "y \<in> H"
  17.423 +    have "h' x = h y + a * xi"
  17.424 +      by (rule h'_definite)
  17.425  
  17.426      txt{* Now we show  
  17.427      $h\ap y + a \cdot \xi\leq  p\ap (y\plus a \mult x_0)$ 
  17.428 -    by case analysis on $a$. *};
  17.429 +    by case analysis on $a$. *}
  17.430  
  17.431 -    also; have "... <= p (y + a \<cdot> x0)";
  17.432 -    proof (rule linorder_cases);
  17.433 +    also have "... <= p (y + a \<cdot> x0)"
  17.434 +    proof (rule linorder_cases)
  17.435  
  17.436 -      assume z: "a = #0"; 
  17.437 -      with vs y a; show ?thesis; by simp;
  17.438 +      assume z: "a = #0" 
  17.439 +      with vs y a show ?thesis by simp
  17.440  
  17.441      txt {* In the case $a < 0$, we use $a_1$ with $\idt{ya}$ 
  17.442 -    taken as $y/a$: *};
  17.443 +    taken as $y/a$: *}
  17.444  
  17.445 -    next;
  17.446 -      assume lz: "a < #0"; hence nz: "a \<noteq> #0"; by simp;
  17.447 -      from a1; 
  17.448 -      have "- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y) <= xi";
  17.449 -        by (rule bspec) (simp!);
  17.450 +    next
  17.451 +      assume lz: "a < #0" hence nz: "a \<noteq> #0" by simp
  17.452 +      from a1 
  17.453 +      have "- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y) <= xi"
  17.454 +        by (rule bspec) (simp!)
  17.455  
  17.456        txt {* The thesis for this case now follows by a short  
  17.457 -      calculation. *};      
  17.458 -      hence "a * xi <= a * (- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))";
  17.459 -        by (rule real_mult_less_le_anti [OF lz]);
  17.460 -      also; 
  17.461 -      have "... = - a * (p (rinv a \<cdot> y + x0)) - a * (h (rinv a \<cdot> y))";
  17.462 -        by (rule real_mult_diff_distrib);
  17.463 -      also; from lz vs y; 
  17.464 -      have "- a * (p (rinv a \<cdot> y + x0)) = p (a \<cdot> (rinv a \<cdot> y + x0))";
  17.465 -        by (simp add: seminorm_abs_homogenous abs_minus_eqI2);
  17.466 -      also; from nz vs y; have "... = p (y + a \<cdot> x0)";
  17.467 -        by (simp add: vs_add_mult_distrib1);
  17.468 -      also; from nz vs y; have "a * (h (rinv a \<cdot> y)) =  h y";
  17.469 -        by (simp add: linearform_mult [symmetric]);
  17.470 -      finally; have "a * xi <= p (y + a \<cdot> x0) - h y"; .;
  17.471 +      calculation. *}      
  17.472 +      hence "a * xi <= a * (- p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))"
  17.473 +        by (rule real_mult_less_le_anti [OF lz])
  17.474 +      also 
  17.475 +      have "... = - a * (p (rinv a \<cdot> y + x0)) - a * (h (rinv a \<cdot> y))"
  17.476 +        by (rule real_mult_diff_distrib)
  17.477 +      also from lz vs y 
  17.478 +      have "- a * (p (rinv a \<cdot> y + x0)) = p (a \<cdot> (rinv a \<cdot> y + x0))"
  17.479 +        by (simp add: seminorm_abs_homogenous abs_minus_eqI2)
  17.480 +      also from nz vs y have "... = p (y + a \<cdot> x0)"
  17.481 +        by (simp add: vs_add_mult_distrib1)
  17.482 +      also from nz vs y have "a * (h (rinv a \<cdot> y)) =  h y"
  17.483 +        by (simp add: linearform_mult [symmetric])
  17.484 +      finally have "a * xi <= p (y + a \<cdot> x0) - h y" .
  17.485  
  17.486 -      hence "h y + a * xi <= h y + p (y + a \<cdot> x0) - h y";
  17.487 -        by (simp add: real_add_left_cancel_le);
  17.488 -      thus ?thesis; by simp;
  17.489 +      hence "h y + a * xi <= h y + p (y + a \<cdot> x0) - h y"
  17.490 +        by (simp add: real_add_left_cancel_le)
  17.491 +      thus ?thesis by simp
  17.492  
  17.493        txt {* In the case $a > 0$, we use $a_2$ with $\idt{ya}$ 
  17.494 -      taken as $y/a$: *};
  17.495 +      taken as $y/a$: *}
  17.496  
  17.497 -    next; 
  17.498 -      assume gz: "#0 < a"; hence nz: "a \<noteq> #0"; by simp;
  17.499 -      from a2; have "xi <= p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y)";
  17.500 -        by (rule bspec) (simp!);
  17.501 +    next 
  17.502 +      assume gz: "#0 < a" hence nz: "a \<noteq> #0" by simp
  17.503 +      from a2 have "xi <= p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y)"
  17.504 +        by (rule bspec) (simp!)
  17.505  
  17.506        txt {* The thesis for this case follows by a short
  17.507 -      calculation: *};
  17.508 +      calculation: *}
  17.509  
  17.510 -      with gz; 
  17.511 -      have "a * xi <= a * (p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))";
  17.512 -        by (rule real_mult_less_le_mono);
  17.513 -      also; have "... = a * p (rinv a \<cdot> y + x0) - a * h (rinv a \<cdot> y)";
  17.514 -        by (rule real_mult_diff_distrib2); 
  17.515 -      also; from gz vs y; 
  17.516 -      have "a * p (rinv a \<cdot> y + x0) = p (a \<cdot> (rinv a \<cdot> y + x0))";
  17.517 -        by (simp add: seminorm_abs_homogenous abs_eqI2);
  17.518 -      also; from nz vs y; have "... = p (y + a \<cdot> x0)";
  17.519 -        by (simp add: vs_add_mult_distrib1);
  17.520 -      also; from nz vs y; have "a * h (rinv a \<cdot> y) = h y";
  17.521 -        by (simp add: linearform_mult [symmetric]); 
  17.522 -      finally; have "a * xi <= p (y + a \<cdot> x0) - h y"; .;
  17.523 +      with gz 
  17.524 +      have "a * xi <= a * (p (rinv a \<cdot> y + x0) - h (rinv a \<cdot> y))"
  17.525 +        by (rule real_mult_less_le_mono)
  17.526 +      also have "... = a * p (rinv a \<cdot> y + x0) - a * h (rinv a \<cdot> y)"
  17.527 +        by (rule real_mult_diff_distrib2) 
  17.528 +      also from gz vs y 
  17.529 +      have "a * p (rinv a \<cdot> y + x0) = p (a \<cdot> (rinv a \<cdot> y + x0))"
  17.530 +        by (simp add: seminorm_abs_homogenous abs_eqI2)
  17.531 +      also from nz vs y have "... = p (y + a \<cdot> x0)"
  17.532 +        by (simp add: vs_add_mult_distrib1)
  17.533 +      also from nz vs y have "a * h (rinv a \<cdot> y) = h y"
  17.534 +        by (simp add: linearform_mult [symmetric]) 
  17.535 +      finally have "a * xi <= p (y + a \<cdot> x0) - h y" .
  17.536   
  17.537 -      hence "h y + a * xi <= h y + (p (y + a \<cdot> x0) - h y)";
  17.538 -        by (simp add: real_add_left_cancel_le);
  17.539 -      thus ?thesis; by simp;
  17.540 -    qed;
  17.541 -    also; from x; have "... = p x"; by simp;
  17.542 -    finally; show ?thesis; .;
  17.543 -  qed;
  17.544 -qed blast+; 
  17.545 +      hence "h y + a * xi <= h y + (p (y + a \<cdot> x0) - h y)"
  17.546 +        by (simp add: real_add_left_cancel_le)
  17.547 +      thus ?thesis by simp
  17.548 +    qed
  17.549 +    also from x have "... = p x" by simp
  17.550 +    finally show ?thesis .
  17.551 +  qed
  17.552 +qed blast+ 
  17.553  
  17.554 -end;
  17.555 +end